提交 ec0bf39a 编写于 作者: L Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (73 commits)
  [SCSI] aic79xx: Add ASC-29320LPE ids to driver
  [SCSI] stex: version update
  [SCSI] stex: change wait loop code
  [SCSI] stex: add new device type support
  [SCSI] stex: update device id info
  [SCSI] stex: adjust default queue length
  [SCSI] stex: add value check in hard reset routine
  [SCSI] stex: fix controller_info command handling
  [SCSI] stex: fix biosparam calculation
  [SCSI] megaraid: fix MMIO casts
  [SCSI] tgt: fix undefined flush_dcache_page() problem
  [SCSI] libsas: better error handling in sas_expander.c
  [SCSI] lpfc 8.1.11 : Change version number to 8.1.11
  [SCSI] lpfc 8.1.11 : Misc Fixes
  [SCSI] lpfc 8.1.11 : Add soft_wwnn sysfs attribute, rename soft_wwn_enable
  [SCSI] lpfc 8.1.11 : Removed decoding of PCI Subsystem Id
  [SCSI] lpfc 8.1.11 : Add MSI (Message Signalled Interrupts) support
  [SCSI] lpfc 8.1.11 : Adjust LOG_FCP logging
  [SCSI] lpfc 8.1.11 : Fix Memory leaks
  [SCSI] lpfc 8.1.11 : Fix lpfc_multi_ring_support
  ...
...@@ -1416,6 +1416,11 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -1416,6 +1416,11 @@ and is between 256 and 4096 characters. It is defined in the file
scsi_logging= [SCSI] scsi_logging= [SCSI]
scsi_mod.scan= [SCSI] sync (default) scans SCSI busses as they are
discovered. async scans them in kernel threads,
allowing boot to proceed. none ignores them, expecting
user space to do the scan.
selinux [SELINUX] Disable or enable SELinux at boot time. selinux [SELINUX] Disable or enable SELinux at boot time.
Format: { "0" | "1" } Format: { "0" | "1" }
See security/selinux/Kconfig help text. See security/selinux/Kconfig help text.
......
...@@ -375,7 +375,6 @@ Summary: ...@@ -375,7 +375,6 @@ Summary:
scsi_add_device - creates new scsi device (lu) instance scsi_add_device - creates new scsi device (lu) instance
scsi_add_host - perform sysfs registration and set up transport class scsi_add_host - perform sysfs registration and set up transport class
scsi_adjust_queue_depth - change the queue depth on a SCSI device scsi_adjust_queue_depth - change the queue depth on a SCSI device
scsi_assign_lock - replace default host_lock with given lock
scsi_bios_ptable - return copy of block device's partition table scsi_bios_ptable - return copy of block device's partition table
scsi_block_requests - prevent further commands being queued to given host scsi_block_requests - prevent further commands being queued to given host
scsi_deactivate_tcq - turn off tag command queueing scsi_deactivate_tcq - turn off tag command queueing
...@@ -488,20 +487,6 @@ void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged, ...@@ -488,20 +487,6 @@ void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged,
int tags) int tags)
/**
* scsi_assign_lock - replace default host_lock with given lock
* @shost: a pointer to a scsi host instance
* @lock: pointer to lock to replace host_lock for this host
*
* Returns nothing
*
* Might block: no
*
* Defined in: include/scsi/scsi_host.h .
**/
void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
/** /**
* scsi_bios_ptable - return copy of block device's partition table * scsi_bios_ptable - return copy of block device's partition table
* @dev: pointer to block device * @dev: pointer to block device
...@@ -1366,17 +1351,11 @@ Locks ...@@ -1366,17 +1351,11 @@ Locks
Each struct Scsi_Host instance has a spin_lock called struct Each struct Scsi_Host instance has a spin_lock called struct
Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in
hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer
is initialized to point at default_lock with the scsi_assign_lock() function. is initialized to point at default_lock. Thereafter lock and unlock
Thereafter lock and unlock operations performed by the mid level use the operations performed by the mid level use the struct Scsi_Host::host_lock
struct Scsi_Host::host_lock pointer. pointer. Previously drivers could override the host_lock pointer but
this is not allowed anymore.
LLDs can override the use of struct Scsi_Host::default_lock by
using scsi_assign_lock(). The earliest opportunity to do this would
be in the detect() function after it has invoked scsi_register(). It
could be replaced by a coarser grain lock (e.g. per driver) or a
lock of equal granularity (i.e. per host). Using finer grain locks
(e.g. per SCSI device) may be possible by juggling locks in
queuecommand().
Autosense Autosense
========= =========
......
...@@ -277,7 +277,7 @@ static int sg_io(struct file *file, request_queue_t *q, ...@@ -277,7 +277,7 @@ static int sg_io(struct file *file, request_queue_t *q,
if (rq->bio) if (rq->bio)
blk_queue_bounce(q, &rq->bio); blk_queue_bounce(q, &rq->bio);
rq->timeout = (hdr->timeout * HZ) / 1000; rq->timeout = jiffies_to_msecs(hdr->timeout);
if (!rq->timeout) if (!rq->timeout)
rq->timeout = q->sg_timeout; rq->timeout = q->sg_timeout;
if (!rq->timeout) if (!rq->timeout)
......
...@@ -622,8 +622,10 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata, ...@@ -622,8 +622,10 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
/* restore the old result if the request sense was /* restore the old result if the request sense was
* successful */ * successful */
if(result == 0) if (result == 0)
result = cmnd[7]; result = cmnd[7];
/* restore the original length */
SCp->cmd_len = cmnd[8];
} else } else
NCR_700_unmap(hostdata, SCp, slot); NCR_700_unmap(hostdata, SCp, slot);
...@@ -1007,6 +1009,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, ...@@ -1007,6 +1009,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
* of the command */ * of the command */
cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC; cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
cmnd[7] = hostdata->status[0]; cmnd[7] = hostdata->status[0];
cmnd[8] = SCp->cmd_len;
SCp->cmd_len = 6; /* command length for
* REQUEST_SENSE */
slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer)); slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
......
...@@ -2186,21 +2186,21 @@ static int __init BusLogic_init(void) ...@@ -2186,21 +2186,21 @@ static int __init BusLogic_init(void)
if (BusLogic_ProbeOptions.NoProbe) if (BusLogic_ProbeOptions.NoProbe)
return -ENODEV; return -ENODEV;
BusLogic_ProbeInfoList = (struct BusLogic_ProbeInfo *) BusLogic_ProbeInfoList =
kmalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_ATOMIC); kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL);
if (BusLogic_ProbeInfoList == NULL) { if (BusLogic_ProbeInfoList == NULL) {
BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL); BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
return -ENOMEM; return -ENOMEM;
} }
memset(BusLogic_ProbeInfoList, 0, BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo));
PrototypeHostAdapter = (struct BusLogic_HostAdapter *) PrototypeHostAdapter =
kmalloc(sizeof(struct BusLogic_HostAdapter), GFP_ATOMIC); kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL);
if (PrototypeHostAdapter == NULL) { if (PrototypeHostAdapter == NULL) {
kfree(BusLogic_ProbeInfoList); kfree(BusLogic_ProbeInfoList);
BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL); BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL);
return -ENOMEM; return -ENOMEM;
} }
memset(PrototypeHostAdapter, 0, sizeof(struct BusLogic_HostAdapter));
#ifdef MODULE #ifdef MODULE
if (BusLogic != NULL) if (BusLogic != NULL)
BusLogic_Setup(BusLogic); BusLogic_Setup(BusLogic);
......
...@@ -29,6 +29,13 @@ config SCSI ...@@ -29,6 +29,13 @@ config SCSI
However, do not compile this as a module if your root file system However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a SCSI device. (the one containing the directory /) is located on a SCSI device.
config SCSI_TGT
tristate "SCSI target support"
depends on SCSI && EXPERIMENTAL
---help---
If you want to use SCSI target mode drivers enable this option.
If you choose M, the module will be called scsi_tgt.
config SCSI_NETLINK config SCSI_NETLINK
bool bool
default n default n
...@@ -216,6 +223,23 @@ config SCSI_LOGGING ...@@ -216,6 +223,23 @@ config SCSI_LOGGING
there should be no noticeable performance impact as long as you have there should be no noticeable performance impact as long as you have
logging turned off. logging turned off.
config SCSI_SCAN_ASYNC
bool "Asynchronous SCSI scanning"
depends on SCSI
help
The SCSI subsystem can probe for devices while the rest of the
system continues booting, and even probe devices on different
busses in parallel, leading to a significant speed-up.
If you have built SCSI as modules, enabling this option can
be a problem as the devices may not have been found by the
time your system expects them to have been. You can load the
scsi_wait_scan module to ensure that all scans have completed.
If you build your SCSI drivers into the kernel, then everything
will work fine if you say Y here.
You can override this choice by specifying scsi_mod.scan="sync"
or "async" on the kernel's command line.
menu "SCSI Transports" menu "SCSI Transports"
depends on SCSI depends on SCSI
...@@ -797,6 +821,20 @@ config SCSI_IBMVSCSI ...@@ -797,6 +821,20 @@ config SCSI_IBMVSCSI
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called ibmvscsic. module will be called ibmvscsic.
config SCSI_IBMVSCSIS
tristate "IBM Virtual SCSI Server support"
depends on PPC_PSERIES && SCSI_TGT && SCSI_SRP
help
This is the SRP target driver for IBM pSeries virtual environments.
The userspace component needed to initialize the driver and
documentation can be found:
http://stgt.berlios.de/
To compile this driver as a module, choose M here: the
module will be called ibmvstgt.
config SCSI_INITIO config SCSI_INITIO
tristate "Initio 9100U(W) support" tristate "Initio 9100U(W) support"
depends on PCI && SCSI depends on PCI && SCSI
...@@ -944,8 +982,13 @@ config SCSI_STEX ...@@ -944,8 +982,13 @@ config SCSI_STEX
tristate "Promise SuperTrak EX Series support" tristate "Promise SuperTrak EX Series support"
depends on PCI && SCSI depends on PCI && SCSI
---help--- ---help---
This driver supports Promise SuperTrak EX8350/8300/16350/16300 This driver supports Promise SuperTrak EX series storage controllers.
Storage controllers.
Promise provides Linux RAID configuration utility for these
controllers. Please visit <http://www.promise.com> to download.
To compile this driver as a module, choose M here: the
module will be called stex.
config SCSI_SYM53C8XX_2 config SCSI_SYM53C8XX_2
tristate "SYM53C8XX Version 2 SCSI support" tristate "SYM53C8XX Version 2 SCSI support"
...@@ -1026,6 +1069,7 @@ config SCSI_IPR ...@@ -1026,6 +1069,7 @@ config SCSI_IPR
config SCSI_IPR_TRACE config SCSI_IPR_TRACE
bool "enable driver internal trace" bool "enable driver internal trace"
depends on SCSI_IPR depends on SCSI_IPR
default y
help help
If you say Y here, the driver will trace all commands issued If you say Y here, the driver will trace all commands issued
to the adapter. Performance impact is minimal. Trace can be to the adapter. Performance impact is minimal. Trace can be
...@@ -1034,6 +1078,7 @@ config SCSI_IPR_TRACE ...@@ -1034,6 +1078,7 @@ config SCSI_IPR_TRACE
config SCSI_IPR_DUMP config SCSI_IPR_DUMP
bool "enable adapter dump support" bool "enable adapter dump support"
depends on SCSI_IPR depends on SCSI_IPR
default y
help help
If you say Y here, the driver will support adapter crash dump. If you say Y here, the driver will support adapter crash dump.
If you enable this support, the iprdump daemon can be used If you enable this support, the iprdump daemon can be used
...@@ -1734,6 +1779,16 @@ config ZFCP ...@@ -1734,6 +1779,16 @@ config ZFCP
called zfcp. If you want to compile it as a module, say M here called zfcp. If you want to compile it as a module, say M here
and read <file:Documentation/modules.txt>. and read <file:Documentation/modules.txt>.
config SCSI_SRP
tristate "SCSI RDMA Protocol helper library"
depends on SCSI && PCI
select SCSI_TGT
help
If you wish to use SRP target drivers, say Y.
To compile this driver as a module, choose M here: the
module will be called libsrp.
endmenu endmenu
source "drivers/scsi/pcmcia/Kconfig" source "drivers/scsi/pcmcia/Kconfig"
......
...@@ -21,6 +21,7 @@ CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM ...@@ -21,6 +21,7 @@ CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
subdir-$(CONFIG_PCMCIA) += pcmcia subdir-$(CONFIG_PCMCIA) += pcmcia
obj-$(CONFIG_SCSI) += scsi_mod.o obj-$(CONFIG_SCSI) += scsi_mod.o
obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o obj-$(CONFIG_RAID_ATTRS) += raid_class.o
...@@ -125,7 +126,9 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o ...@@ -125,7 +126,9 @@ obj-$(CONFIG_SCSI_FCAL) += fcal.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_SRP) += libsrp.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_SCSI_STEX) += stex.o
...@@ -141,6 +144,8 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o ...@@ -141,6 +144,8 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
# This goes last, so that "real" scsi devices probe earlier # This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
obj-$(CONFIG_SCSI) += scsi_wait_scan.o
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
scsicam.o scsi_error.o scsi_lib.o \ scsicam.o scsi_error.o scsi_lib.o \
scsi_scan.o scsi_sysfs.o \ scsi_scan.o scsi_sysfs.o \
...@@ -149,6 +154,8 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o ...@@ -149,6 +154,8 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
sd_mod-objs := sd.o sd_mod-objs := sd.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
......
...@@ -220,9 +220,11 @@ static void *addresses[] = { ...@@ -220,9 +220,11 @@ static void *addresses[] = {
static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 }; static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 };
#define PORT_COUNT ARRAY_SIZE(ports) #define PORT_COUNT ARRAY_SIZE(ports)
#ifndef MODULE
/* possible interrupt channels */ /* possible interrupt channels */
static unsigned short intrs[] = { 10, 11, 12, 15 }; static unsigned short intrs[] = { 10, 11, 12, 15 };
#define INTR_COUNT ARRAY_SIZE(intrs) #define INTR_COUNT ARRAY_SIZE(intrs)
#endif /* !MODULE */
/* signatures for NCR 53c406a based controllers */ /* signatures for NCR 53c406a based controllers */
#if USE_BIOS #if USE_BIOS
...@@ -605,6 +607,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost) ...@@ -605,6 +607,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost)
return 0; return 0;
} }
#ifndef MODULE
/* called from init/main.c */ /* called from init/main.c */
static int __init NCR53c406a_setup(char *str) static int __init NCR53c406a_setup(char *str)
{ {
...@@ -661,6 +664,8 @@ static int __init NCR53c406a_setup(char *str) ...@@ -661,6 +664,8 @@ static int __init NCR53c406a_setup(char *str)
__setup("ncr53c406a=", NCR53c406a_setup); __setup("ncr53c406a=", NCR53c406a_setup);
#endif /* !MODULE */
static const char *NCR53c406a_info(struct Scsi_Host *SChost) static const char *NCR53c406a_info(struct Scsi_Host *SChost)
{ {
DEB(printk("NCR53c406a_info called\n")); DEB(printk("NCR53c406a_info called\n"));
......
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD #ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 2409 # define AAC_DRIVER_BUILD 2423
# define AAC_DRIVER_BRANCH "-mh2" # define AAC_DRIVER_BRANCH "-mh3"
#endif #endif
#define MAXIMUM_NUM_CONTAINERS 32 #define MAXIMUM_NUM_CONTAINERS 32
......
...@@ -518,6 +518,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, ...@@ -518,6 +518,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
*/ */
unsigned long count = 36000000L; /* 3 minutes */ unsigned long count = 36000000L; /* 3 minutes */
while (down_trylock(&fibptr->event_wait)) { while (down_trylock(&fibptr->event_wait)) {
int blink;
if (--count == 0) { if (--count == 0) {
spin_lock_irqsave(q->lock, qflags); spin_lock_irqsave(q->lock, qflags);
q->numpending--; q->numpending--;
...@@ -530,6 +531,14 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, ...@@ -530,6 +531,14 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
} }
return -ETIMEDOUT; return -ETIMEDOUT;
} }
if ((blink = aac_adapter_check_health(dev)) > 0) {
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
"Usually a result of a serious unrecoverable hardware problem\n",
blink);
}
return -EFAULT;
}
udelay(5); udelay(5);
} }
} else if (down_interruptible(&fibptr->event_wait)) { } else if (down_interruptible(&fibptr->event_wait)) {
...@@ -1093,6 +1102,20 @@ static int _aac_reset_adapter(struct aac_dev *aac) ...@@ -1093,6 +1102,20 @@ static int _aac_reset_adapter(struct aac_dev *aac)
goto out; goto out;
} }
/*
* Loop through the fibs, close the synchronous FIBS
*/
for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
struct fib *fib = &aac->fibs[index];
if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
(fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
up(&fib->event_wait);
spin_unlock_irqrestore(&fib->event_lock, flagv);
schedule();
}
}
index = aac->cardtype; index = aac->cardtype;
/* /*
......
...@@ -586,7 +586,7 @@ static struct scsi_host_template aha1740_template = { ...@@ -586,7 +586,7 @@ static struct scsi_host_template aha1740_template = {
static int aha1740_probe (struct device *dev) static int aha1740_probe (struct device *dev)
{ {
int slotbase; int slotbase, rc;
unsigned int irq_level, irq_type, translation; unsigned int irq_level, irq_type, translation;
struct Scsi_Host *shpnt; struct Scsi_Host *shpnt;
struct aha1740_hostdata *host; struct aha1740_hostdata *host;
...@@ -641,10 +641,16 @@ static int aha1740_probe (struct device *dev) ...@@ -641,10 +641,16 @@ static int aha1740_probe (struct device *dev)
} }
eisa_set_drvdata (edev, shpnt); eisa_set_drvdata (edev, shpnt);
scsi_add_host (shpnt, dev); /* XXX handle failure */
rc = scsi_add_host (shpnt, dev);
if (rc)
goto err_irq;
scsi_scan_host (shpnt); scsi_scan_host (shpnt);
return 0; return 0;
err_irq:
free_irq(irq_level, shpnt);
err_unmap: err_unmap:
dma_unmap_single (&edev->dev, host->ecb_dma_addr, dma_unmap_single (&edev->dev, host->ecb_dma_addr,
sizeof (host->ecb), DMA_BIDIRECTIONAL); sizeof (host->ecb), DMA_BIDIRECTIONAL);
......
...@@ -62,6 +62,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = { ...@@ -62,6 +62,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = {
/* aic7901 based controllers */ /* aic7901 based controllers */
ID(ID_AHA_29320A), ID(ID_AHA_29320A),
ID(ID_AHA_29320ALP), ID(ID_AHA_29320ALP),
ID(ID_AHA_29320LPE),
/* aic7902 based controllers */ /* aic7902 based controllers */
ID(ID_AHA_29320), ID(ID_AHA_29320),
ID(ID_AHA_29320B), ID(ID_AHA_29320B),
......
...@@ -109,7 +109,13 @@ static struct ahd_pci_identity ahd_pci_ident_table [] = ...@@ -109,7 +109,13 @@ static struct ahd_pci_identity ahd_pci_ident_table [] =
{ {
ID_AHA_29320ALP, ID_AHA_29320ALP,
ID_ALL_MASK, ID_ALL_MASK,
"Adaptec 29320ALP Ultra320 SCSI adapter", "Adaptec 29320ALP PCIx Ultra320 SCSI adapter",
ahd_aic7901_setup
},
{
ID_AHA_29320LPE,
ID_ALL_MASK,
"Adaptec 29320LPE PCIe Ultra320 SCSI adapter",
ahd_aic7901_setup ahd_aic7901_setup
}, },
/* aic7901A based controllers */ /* aic7901A based controllers */
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#define ID_AIC7901 0x800F9005FFFF9005ull #define ID_AIC7901 0x800F9005FFFF9005ull
#define ID_AHA_29320A 0x8000900500609005ull #define ID_AHA_29320A 0x8000900500609005ull
#define ID_AHA_29320ALP 0x8017900500449005ull #define ID_AHA_29320ALP 0x8017900500449005ull
#define ID_AHA_29320LPE 0x8017900500459005ull
#define ID_AIC7901A 0x801E9005FFFF9005ull #define ID_AIC7901A 0x801E9005FFFF9005ull
#define ID_AHA_29320LP 0x8014900500449005ull #define ID_AHA_29320LP 0x8014900500449005ull
......
...@@ -724,6 +724,15 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha) ...@@ -724,6 +724,15 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha)
list_for_each_safe(pos, n, &pending) { list_for_each_safe(pos, n, &pending) {
struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list); struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
/*
* Delete unexpired ascb timers. This may happen if we issue
* a CONTROL PHY scb to an adapter and rmmod before the scb
* times out. Apparently we don't wait for the CONTROL PHY
* to complete, so it doesn't matter if we kill the timer.
*/
del_timer_sync(&ascb->timer);
WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
list_del_init(pos); list_del_init(pos);
ASD_DPRINTK("freeing from pending\n"); ASD_DPRINTK("freeing from pending\n");
asd_ascb_free(ascb); asd_ascb_free(ascb);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
*/ */
#include <linux/pci.h> #include <linux/pci.h>
#include <scsi/scsi_host.h>
#include "aic94xx.h" #include "aic94xx.h"
#include "aic94xx_reg.h" #include "aic94xx_reg.h"
...@@ -412,6 +413,39 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) ...@@ -412,6 +413,39 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
} }
} }
/* hard reset a phy later */
static void do_phy_reset_later(void *data)
{
struct sas_phy *sas_phy = data;
int error;
ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
sas_phy->identify.phy_identifier);
/* Reset device port */
error = sas_phy_reset(sas_phy, 1);
if (error)
ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
__FUNCTION__, sas_phy->identify.phy_identifier, error);
}
static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
{
INIT_WORK(&sas_phy->reset_work, do_phy_reset_later, sas_phy);
queue_work(shost->work_q, &sas_phy->reset_work);
}
/* start up the ABORT TASK tmf... */
static void task_kill_later(struct asd_ascb *ascb)
{
struct asd_ha_struct *asd_ha = ascb->ha;
struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_task *task = ascb->uldd_task;
INIT_WORK(&task->abort_work, (void (*)(void *))sas_task_abort, task);
queue_work(shost->work_q, &task->abort_work);
}
static void escb_tasklet_complete(struct asd_ascb *ascb, static void escb_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl) struct done_list_struct *dl)
{ {
...@@ -439,6 +473,74 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, ...@@ -439,6 +473,74 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
ascb->scb->header.opcode); ascb->scb->header.opcode);
} }
/* Catch these before we mask off the sb_opcode bits */
switch (sb_opcode) {
case REQ_TASK_ABORT: {
struct asd_ascb *a, *b;
u16 tc_abort;
tc_abort = *((u16*)(&dl->status_block[1]));
tc_abort = le16_to_cpu(tc_abort);
ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
__FUNCTION__, dl->status_block[3]);
/* Find the pending task and abort it. */
list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list)
if (a->tc_index == tc_abort) {
task_kill_later(a);
break;
}
goto out;
}
case REQ_DEVICE_RESET: {
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_phy *dev_phy;
struct asd_ascb *a;
u16 conn_handle;
conn_handle = *((u16*)(&dl->status_block[1]));
conn_handle = le16_to_cpu(conn_handle);
ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
dl->status_block[3]);
/* Kill all pending tasks and reset the device */
dev_phy = NULL;
list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
struct sas_task *task;
struct domain_device *dev;
u16 x;
task = a->uldd_task;
if (!task)
continue;
dev = task->dev;
x = (unsigned long)dev->lldd_dev;
if (x == conn_handle) {
dev_phy = dev->port->phy;
task_kill_later(a);
}
}
/* Reset device port */
if (!dev_phy) {
ASD_DPRINTK("%s: No pending commands; can't reset.\n",
__FUNCTION__);
goto out;
}
phy_reset_later(dev_phy, shost);
goto out;
}
case SIGNAL_NCQ_ERROR:
ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
goto out;
case CLEAR_NCQ_ERROR:
ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
goto out;
}
sb_opcode &= ~DL_PHY_MASK; sb_opcode &= ~DL_PHY_MASK;
switch (sb_opcode) { switch (sb_opcode) {
...@@ -469,22 +571,6 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, ...@@ -469,22 +571,6 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
asd_deform_port(asd_ha, phy); asd_deform_port(asd_ha, phy);
sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
break; break;
case REQ_TASK_ABORT:
ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
phy_id);
break;
case REQ_DEVICE_RESET:
ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
phy_id);
break;
case SIGNAL_NCQ_ERROR:
ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
phy_id);
break;
case CLEAR_NCQ_ERROR:
ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
phy_id);
break;
default: default:
ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
phy_id, sb_opcode); phy_id, sb_opcode);
...@@ -504,7 +590,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, ...@@ -504,7 +590,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
break; break;
} }
out:
asd_invalidate_edb(ascb, edb); asd_invalidate_edb(ascb, edb);
} }
......
...@@ -294,6 +294,7 @@ static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL }; ...@@ -294,6 +294,7 @@ static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL };
static int user_fifo_count = 0; static int user_fifo_count = 0;
static int user_fifo_size = 0; static int user_fifo_size = 0;
#ifndef MODULE
static int __init fd_mcs_setup(char *str) static int __init fd_mcs_setup(char *str)
{ {
static int done_setup = 0; static int done_setup = 0;
...@@ -311,6 +312,7 @@ static int __init fd_mcs_setup(char *str) ...@@ -311,6 +312,7 @@ static int __init fd_mcs_setup(char *str)
} }
__setup("fd_mcs=", fd_mcs_setup); __setup("fd_mcs=", fd_mcs_setup);
#endif /* !MODULE */
static void print_banner(struct Scsi_Host *shpnt) static void print_banner(struct Scsi_Host *shpnt)
{ {
......
...@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev) ...@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev)
kthread_stop(shost->ehandler); kthread_stop(shost->ehandler);
if (shost->work_q) if (shost->work_q)
destroy_workqueue(shost->work_q); destroy_workqueue(shost->work_q);
if (shost->uspace_req_q) {
kfree(shost->uspace_req_q->queuedata);
scsi_free_queue(shost->uspace_req_q);
}
scsi_destroy_command_freelist(shost); scsi_destroy_command_freelist(shost);
if (shost->bqt) if (shost->bqt)
...@@ -301,8 +305,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) ...@@ -301,8 +305,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
if (!shost) if (!shost)
return NULL; return NULL;
spin_lock_init(&shost->default_lock); shost->host_lock = &shost->default_lock;
scsi_assign_lock(shost, &shost->default_lock); spin_lock_init(shost->host_lock);
shost->shost_state = SHOST_CREATED; shost->shost_state = SHOST_CREATED;
INIT_LIST_HEAD(&shost->__devices); INIT_LIST_HEAD(&shost->__devices);
INIT_LIST_HEAD(&shost->__targets); INIT_LIST_HEAD(&shost->__targets);
......
...@@ -3,3 +3,5 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o ...@@ -3,3 +3,5 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
ibmvscsic-y += ibmvscsi.o ibmvscsic-y += ibmvscsi.o
ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
此差异已折叠。
...@@ -170,7 +170,7 @@ static int setup_debug = 0; ...@@ -170,7 +170,7 @@ static int setup_debug = 0;
static void i91uSCBPost(BYTE * pHcb, BYTE * pScb); static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
/* PCI Devices supported by this driver */ /* PCI Devices supported by this driver */
static struct pci_device_id i91u_pci_devices[] __devinitdata = { static struct pci_device_id i91u_pci_devices[] = {
{ PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_INIT, I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_INIT, I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_INIT, I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
......
...@@ -79,7 +79,6 @@ ...@@ -79,7 +79,6 @@
#include <scsi/scsi_tcq.h> #include <scsi/scsi_tcq.h>
#include <scsi/scsi_eh.h> #include <scsi/scsi_eh.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport.h>
#include "ipr.h" #include "ipr.h"
/* /*
...@@ -98,7 +97,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock); ...@@ -98,7 +97,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */ /* This table describes the differences between DMA controller chips */
static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
{ /* Gemstone, Citrine, and Obsidian */ { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
.mailbox = 0x0042C, .mailbox = 0x0042C,
.cache_line_size = 0x20, .cache_line_size = 0x20,
{ {
...@@ -135,6 +134,7 @@ static const struct ipr_chip_t ipr_chip[] = { ...@@ -135,6 +134,7 @@ static const struct ipr_chip_t ipr_chip[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] } { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
}; };
...@@ -1249,19 +1249,23 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, ...@@ -1249,19 +1249,23 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
/** /**
* ipr_log_hex_data - Log additional hex IOA error data. * ipr_log_hex_data - Log additional hex IOA error data.
* @ioa_cfg: ioa config struct
* @data: IOA error data * @data: IOA error data
* @len: data length * @len: data length
* *
* Return value: * Return value:
* none * none
**/ **/
static void ipr_log_hex_data(u32 *data, int len) static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
{ {
int i; int i;
if (len == 0) if (len == 0)
return; return;
if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
for (i = 0; i < len / 4; i += 4) { for (i = 0; i < len / 4; i += 4) {
ipr_err("%08X: %08X %08X %08X %08X\n", i*4, ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
be32_to_cpu(data[i]), be32_to_cpu(data[i]),
...@@ -1290,7 +1294,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, ...@@ -1290,7 +1294,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("%s\n", error->failure_reason); ipr_err("%s\n", error->failure_reason);
ipr_err("Remote Adapter VPD:\n"); ipr_err("Remote Adapter VPD:\n");
ipr_log_ext_vpd(&error->vpd); ipr_log_ext_vpd(&error->vpd);
ipr_log_hex_data(error->data, ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) - be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) + (offsetof(struct ipr_hostrcb_error, u) +
offsetof(struct ipr_hostrcb_type_17_error, data))); offsetof(struct ipr_hostrcb_type_17_error, data)));
...@@ -1315,12 +1319,225 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, ...@@ -1315,12 +1319,225 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("%s\n", error->failure_reason); ipr_err("%s\n", error->failure_reason);
ipr_err("Remote Adapter VPD:\n"); ipr_err("Remote Adapter VPD:\n");
ipr_log_vpd(&error->vpd); ipr_log_vpd(&error->vpd);
ipr_log_hex_data(error->data, ipr_log_hex_data(ioa_cfg, error->data,
be32_to_cpu(hostrcb->hcam.length) - be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) + (offsetof(struct ipr_hostrcb_error, u) +
offsetof(struct ipr_hostrcb_type_07_error, data))); offsetof(struct ipr_hostrcb_type_07_error, data)));
} }
static const struct {
u8 active;
char *desc;
} path_active_desc[] = {
{ IPR_PATH_NO_INFO, "Path" },
{ IPR_PATH_ACTIVE, "Active path" },
{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
};
static const struct {
u8 state;
char *desc;
} path_state_desc[] = {
{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
{ IPR_PATH_HEALTHY, "is healthy" },
{ IPR_PATH_DEGRADED, "is degraded" },
{ IPR_PATH_FAILED, "is failed" }
};
/**
* ipr_log_fabric_path - Log a fabric path error
* @hostrcb: hostrcb struct
* @fabric: fabric descriptor
*
* Return value:
* none
**/
static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
struct ipr_hostrcb_fabric_desc *fabric)
{
int i, j;
u8 path_state = fabric->path_state;
u8 active = path_state & IPR_PATH_ACTIVE_MASK;
u8 state = path_state & IPR_PATH_STATE_MASK;
for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
if (path_active_desc[i].active != active)
continue;
for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
if (path_state_desc[j].state != state)
continue;
if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port);
} else if (fabric->cascaded_expander == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port, fabric->phy);
} else if (fabric->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port, fabric->cascaded_expander);
} else {
ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
path_active_desc[i].desc, path_state_desc[j].desc,
fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
}
return;
}
}
ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
}
static const struct {
u8 type;
char *desc;
} path_type_desc[] = {
{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
};
static const struct {
u8 status;
char *desc;
} path_status_desc[] = {
{ IPR_PATH_CFG_NO_PROB, "Functional" },
{ IPR_PATH_CFG_DEGRADED, "Degraded" },
{ IPR_PATH_CFG_FAILED, "Failed" },
{ IPR_PATH_CFG_SUSPECT, "Suspect" },
{ IPR_PATH_NOT_DETECTED, "Missing" },
{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
};
static const char *link_rate[] = {
"unknown",
"disabled",
"phy reset problem",
"spinup hold",
"port selector",
"unknown",
"unknown",
"unknown",
"1.5Gbps",
"3.0Gbps",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown"
};
/**
* ipr_log_path_elem - Log a fabric path element.
* @hostrcb: hostrcb struct
* @cfg: fabric path element struct
*
* Return value:
* none
**/
static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
struct ipr_hostrcb_config_element *cfg)
{
int i, j;
u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
if (type == IPR_PATH_CFG_NOT_EXIST)
return;
for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
if (path_type_desc[i].type != type)
continue;
for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
if (path_status_desc[j].status != status)
continue;
if (type == IPR_PATH_CFG_IOA_PORT) {
ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else {
if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else if (cfg->cascaded_expander == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
"WWN=%08X%08X\n", path_status_desc[j].desc,
path_type_desc[i].desc, cfg->phy,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else if (cfg->phy == 0xff) {
ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
"WWN=%08X%08X\n", path_status_desc[j].desc,
path_type_desc[i].desc, cfg->cascaded_expander,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
} else {
ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
"WWN=%08X%08X\n", path_status_desc[j].desc,
path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
}
return;
}
}
ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
"WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
* ipr_log_fabric_error - Log a fabric error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
*
* Return value:
* none
**/
static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
struct ipr_hostrcb_type_20_error *error;
struct ipr_hostrcb_fabric_desc *fabric;
struct ipr_hostrcb_config_element *cfg;
int i, add_len;
error = &hostrcb->hcam.u.error.u.type_20_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
add_len = be32_to_cpu(hostrcb->hcam.length) -
(offsetof(struct ipr_hostrcb_error, u) +
offsetof(struct ipr_hostrcb_type_20_error, desc));
for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
ipr_log_fabric_path(hostrcb, fabric);
for_each_fabric_cfg(fabric, cfg)
ipr_log_path_elem(hostrcb, cfg);
add_len -= be16_to_cpu(fabric->length);
fabric = (struct ipr_hostrcb_fabric_desc *)
((unsigned long)fabric + be16_to_cpu(fabric->length));
}
ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
}
/** /**
* ipr_log_generic_error - Log an adapter error. * ipr_log_generic_error - Log an adapter error.
* @ioa_cfg: ioa config struct * @ioa_cfg: ioa config struct
...@@ -1332,7 +1549,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, ...@@ -1332,7 +1549,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb) struct ipr_hostrcb *hostrcb)
{ {
ipr_log_hex_data(hostrcb->hcam.u.raw.data, ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
be32_to_cpu(hostrcb->hcam.length)); be32_to_cpu(hostrcb->hcam.length));
} }
...@@ -1394,13 +1611,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, ...@@ -1394,13 +1611,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
if (!ipr_error_table[error_index].log_hcam) if (!ipr_error_table[error_index].log_hcam)
return; return;
if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
"%s\n", ipr_error_table[error_index].error);
} else {
dev_err(&ioa_cfg->pdev->dev, "%s\n",
ipr_error_table[error_index].error);
}
/* Set indication we have logged an error */ /* Set indication we have logged an error */
ioa_cfg->errors_logged++; ioa_cfg->errors_logged++;
...@@ -1437,6 +1648,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, ...@@ -1437,6 +1648,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_17: case IPR_HOST_RCB_OVERLAY_ID_17:
ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb); ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
break; break;
case IPR_HOST_RCB_OVERLAY_ID_20:
ipr_log_fabric_error(ioa_cfg, hostrcb);
break;
case IPR_HOST_RCB_OVERLAY_ID_1: case IPR_HOST_RCB_OVERLAY_ID_1:
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
default: default:
...@@ -2969,7 +3183,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) ...@@ -2969,7 +3183,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_dump *dump; struct ipr_dump *dump;
unsigned long lock_flags = 0; unsigned long lock_flags = 0;
ENTER;
dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL); dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
if (!dump) { if (!dump) {
...@@ -2996,7 +3209,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) ...@@ -2996,7 +3209,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
} }
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
LEAVE;
return 0; return 0;
} }
...@@ -3573,6 +3785,12 @@ static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes) ...@@ -3573,6 +3785,12 @@ static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
ENTER; ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
res = sata_port->res; res = sata_port->res;
if (res) { if (res) {
rc = ipr_device_reset(ioa_cfg, res); rc = ipr_device_reset(ioa_cfg, res);
...@@ -3636,6 +3854,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) ...@@ -3636,6 +3854,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
if (ipr_cmd->scsi_cmd) if (ipr_cmd->scsi_cmd)
ipr_cmd->done = ipr_scsi_eh_done; ipr_cmd->done = ipr_scsi_eh_done;
if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
}
} }
} }
...@@ -3770,7 +3992,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) ...@@ -3770,7 +3992,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
*/ */
if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead) if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
return FAILED; return FAILED;
if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) if (!res || !ipr_is_gscsi(res))
return FAILED; return FAILED;
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
...@@ -4615,7 +4837,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, ...@@ -4615,7 +4837,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
* Return value: * Return value:
* 0 on success / other on failure * 0 on success / other on failure
**/ **/
int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{ {
struct ipr_resource_entry *res; struct ipr_resource_entry *res;
...@@ -4648,40 +4870,6 @@ static const char * ipr_ioa_info(struct Scsi_Host *host) ...@@ -4648,40 +4870,6 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
return buffer; return buffer;
} }
/**
* ipr_scsi_timed_out - Handle scsi command timeout
* @scsi_cmd: scsi command struct
*
* Return value:
* EH_NOT_HANDLED
**/
enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_cmnd *ipr_cmd;
unsigned long flags;
ENTER;
spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
break;
}
}
spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
LEAVE;
return EH_NOT_HANDLED;
}
static struct scsi_transport_template ipr_transport_template = {
.eh_timed_out = ipr_scsi_timed_out
};
static struct scsi_host_template driver_template = { static struct scsi_host_template driver_template = {
.module = THIS_MODULE, .module = THIS_MODULE,
.name = "IPR", .name = "IPR",
...@@ -4776,6 +4964,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc) ...@@ -4776,6 +4964,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags); spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
while(ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
if (ipr_cmd->qc == qc) { if (ipr_cmd->qc == qc) {
ipr_device_reset(ioa_cfg, sata_port->res); ipr_device_reset(ioa_cfg, sata_port->res);
...@@ -6832,6 +7026,7 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) ...@@ -6832,6 +7026,7 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->hostrcb[i]->hostrcb_dma = ioa_cfg->hostrcb[i]->hostrcb_dma =
ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
} }
...@@ -7017,7 +7212,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, ...@@ -7017,7 +7212,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
host->transportt = &ipr_transport_template;
ata_host_init(&ioa_cfg->ata_host, &pdev->dev, ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
sata_port_info.flags, &ipr_sata_ops); sata_port_info.flags, &ipr_sata_ops);
...@@ -7351,12 +7545,24 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { ...@@ -7351,12 +7545,24 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
...@@ -7366,6 +7572,9 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { ...@@ -7366,6 +7572,9 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
{ } { }
}; };
MODULE_DEVICE_TABLE(pci, ipr_pci_table); MODULE_DEVICE_TABLE(pci, ipr_pci_table);
......
...@@ -37,8 +37,8 @@ ...@@ -37,8 +37,8 @@
/* /*
* Literals * Literals
*/ */
#define IPR_DRIVER_VERSION "2.2.0" #define IPR_DRIVER_VERSION "2.3.0"
#define IPR_DRIVER_DATE "(September 25, 2006)" #define IPR_DRIVER_DATE "(November 8, 2006)"
/* /*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
...@@ -54,6 +54,8 @@ ...@@ -54,6 +54,8 @@
*/ */
#define IPR_NUM_BASE_CMD_BLKS 100 #define IPR_NUM_BASE_CMD_BLKS 100
#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
#define IPR_SUBS_DEV_ID_2780 0x0264 #define IPR_SUBS_DEV_ID_2780 0x0264
#define IPR_SUBS_DEV_ID_5702 0x0266 #define IPR_SUBS_DEV_ID_5702 0x0266
#define IPR_SUBS_DEV_ID_5703 0x0278 #define IPR_SUBS_DEV_ID_5703 0x0278
...@@ -66,7 +68,11 @@ ...@@ -66,7 +68,11 @@
#define IPR_SUBS_DEV_ID_571F 0x02D5 #define IPR_SUBS_DEV_ID_571F 0x02D5
#define IPR_SUBS_DEV_ID_572A 0x02C1 #define IPR_SUBS_DEV_ID_572A 0x02C1
#define IPR_SUBS_DEV_ID_572B 0x02C2 #define IPR_SUBS_DEV_ID_572B 0x02C2
#define IPR_SUBS_DEV_ID_572F 0x02C3
#define IPR_SUBS_DEV_ID_575B 0x030D #define IPR_SUBS_DEV_ID_575B 0x030D
#define IPR_SUBS_DEV_ID_575C 0x0338
#define IPR_SUBS_DEV_ID_57B7 0x0360
#define IPR_SUBS_DEV_ID_57B8 0x02C2
#define IPR_NAME "ipr" #define IPR_NAME "ipr"
...@@ -98,6 +104,7 @@ ...@@ -98,6 +104,7 @@
#define IPR_IOASC_IOA_WAS_RESET 0x10000001 #define IPR_IOASC_IOA_WAS_RESET 0x10000001
#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 #define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002
#define IPR_DEFAULT_MAX_ERROR_DUMP 984
#define IPR_NUM_LOG_HCAMS 2 #define IPR_NUM_LOG_HCAMS 2
#define IPR_NUM_CFG_CHG_HCAMS 2 #define IPR_NUM_CFG_CHG_HCAMS 2
#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) #define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
...@@ -731,6 +738,64 @@ struct ipr_hostrcb_type_17_error { ...@@ -731,6 +738,64 @@ struct ipr_hostrcb_type_17_error {
u32 data[476]; u32 data[476];
}__attribute__((packed, aligned (4))); }__attribute__((packed, aligned (4)));
struct ipr_hostrcb_config_element {
u8 type_status;
#define IPR_PATH_CFG_TYPE_MASK 0xF0
#define IPR_PATH_CFG_NOT_EXIST 0x00
#define IPR_PATH_CFG_IOA_PORT 0x10
#define IPR_PATH_CFG_EXP_PORT 0x20
#define IPR_PATH_CFG_DEVICE_PORT 0x30
#define IPR_PATH_CFG_DEVICE_LUN 0x40
#define IPR_PATH_CFG_STATUS_MASK 0x0F
#define IPR_PATH_CFG_NO_PROB 0x00
#define IPR_PATH_CFG_DEGRADED 0x01
#define IPR_PATH_CFG_FAILED 0x02
#define IPR_PATH_CFG_SUSPECT 0x03
#define IPR_PATH_NOT_DETECTED 0x04
#define IPR_PATH_INCORRECT_CONN 0x05
u8 cascaded_expander;
u8 phy;
u8 link_rate;
#define IPR_PHY_LINK_RATE_MASK 0x0F
__be32 wwid[2];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_fabric_desc {
__be16 length;
u8 ioa_port;
u8 cascaded_expander;
u8 phy;
u8 path_state;
#define IPR_PATH_ACTIVE_MASK 0xC0
#define IPR_PATH_NO_INFO 0x00
#define IPR_PATH_ACTIVE 0x40
#define IPR_PATH_NOT_ACTIVE 0x80
#define IPR_PATH_STATE_MASK 0x0F
#define IPR_PATH_STATE_NO_INFO 0x00
#define IPR_PATH_HEALTHY 0x01
#define IPR_PATH_DEGRADED 0x02
#define IPR_PATH_FAILED 0x03
__be16 num_entries;
struct ipr_hostrcb_config_element elem[1];
}__attribute__((packed, aligned (4)));
#define for_each_fabric_cfg(fabric, cfg) \
for (cfg = (fabric)->elem; \
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
cfg++)
struct ipr_hostrcb_type_20_error {
u8 failure_reason[64];
u8 reserved[3];
u8 num_entries;
struct ipr_hostrcb_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
struct ipr_hostrcb_error { struct ipr_hostrcb_error {
__be32 failing_dev_ioasc; __be32 failing_dev_ioasc;
struct ipr_res_addr failing_dev_res_addr; struct ipr_res_addr failing_dev_res_addr;
...@@ -747,6 +812,7 @@ struct ipr_hostrcb_error { ...@@ -747,6 +812,7 @@ struct ipr_hostrcb_error {
struct ipr_hostrcb_type_13_error type_13_error; struct ipr_hostrcb_type_13_error type_13_error;
struct ipr_hostrcb_type_14_error type_14_error; struct ipr_hostrcb_type_14_error type_14_error;
struct ipr_hostrcb_type_17_error type_17_error; struct ipr_hostrcb_type_17_error type_17_error;
struct ipr_hostrcb_type_20_error type_20_error;
} u; } u;
}__attribute__((packed, aligned (4))); }__attribute__((packed, aligned (4)));
...@@ -786,6 +852,7 @@ struct ipr_hcam { ...@@ -786,6 +852,7 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_14 0x14 #define IPR_HOST_RCB_OVERLAY_ID_14 0x14
#define IPR_HOST_RCB_OVERLAY_ID_16 0x16 #define IPR_HOST_RCB_OVERLAY_ID_16 0x16
#define IPR_HOST_RCB_OVERLAY_ID_17 0x17 #define IPR_HOST_RCB_OVERLAY_ID_17 0x17
#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF #define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
u8 reserved1[3]; u8 reserved1[3];
...@@ -805,6 +872,7 @@ struct ipr_hostrcb { ...@@ -805,6 +872,7 @@ struct ipr_hostrcb {
struct ipr_hcam hcam; struct ipr_hcam hcam;
dma_addr_t hostrcb_dma; dma_addr_t hostrcb_dma;
struct list_head queue; struct list_head queue;
struct ipr_ioa_cfg *ioa_cfg;
}; };
/* IPR smart dump table structures */ /* IPR smart dump table structures */
...@@ -1283,6 +1351,17 @@ struct ipr_ucode_image_header { ...@@ -1283,6 +1351,17 @@ struct ipr_ucode_image_header {
} \ } \
} }
#define ipr_hcam_err(hostrcb, fmt, ...) \
{ \
if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \
ipr_ra_err((hostrcb)->ioa_cfg, \
(hostrcb)->hcam.u.error.failing_dev_res_addr, \
fmt, ##__VA_ARGS__); \
} else { \
dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \
} \
}
#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ #define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
__FILE__, __FUNCTION__, __LINE__) __FILE__, __FUNCTION__, __LINE__)
......
...@@ -5001,7 +5001,7 @@ ips_init_copperhead(ips_ha_t * ha) ...@@ -5001,7 +5001,7 @@ ips_init_copperhead(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (j >= 45) if (j >= 45)
...@@ -5027,7 +5027,7 @@ ips_init_copperhead(ips_ha_t * ha) ...@@ -5027,7 +5027,7 @@ ips_init_copperhead(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (j >= 240) if (j >= 240)
...@@ -5045,7 +5045,7 @@ ips_init_copperhead(ips_ha_t * ha) ...@@ -5045,7 +5045,7 @@ ips_init_copperhead(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (i >= 240) if (i >= 240)
...@@ -5095,7 +5095,7 @@ ips_init_copperhead_memio(ips_ha_t * ha) ...@@ -5095,7 +5095,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (j >= 45) if (j >= 45)
...@@ -5121,7 +5121,7 @@ ips_init_copperhead_memio(ips_ha_t * ha) ...@@ -5121,7 +5121,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (j >= 240) if (j >= 240)
...@@ -5139,7 +5139,7 @@ ips_init_copperhead_memio(ips_ha_t * ha) ...@@ -5139,7 +5139,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (i >= 240) if (i >= 240)
...@@ -5191,7 +5191,7 @@ ips_init_morpheus(ips_ha_t * ha) ...@@ -5191,7 +5191,7 @@ ips_init_morpheus(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (i >= 45) { if (i >= 45) {
...@@ -5217,7 +5217,7 @@ ips_init_morpheus(ips_ha_t * ha) ...@@ -5217,7 +5217,7 @@ ips_init_morpheus(ips_ha_t * ha)
if (Post != 0x4F00) if (Post != 0x4F00)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (i >= 120) { if (i >= 120) {
...@@ -5247,7 +5247,7 @@ ips_init_morpheus(ips_ha_t * ha) ...@@ -5247,7 +5247,7 @@ ips_init_morpheus(ips_ha_t * ha)
break; break;
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
} }
if (i >= 240) { if (i >= 240) {
...@@ -5307,12 +5307,12 @@ ips_reset_copperhead(ips_ha_t * ha) ...@@ -5307,12 +5307,12 @@ ips_reset_copperhead(ips_ha_t * ha)
outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR); outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
outb(0, ha->io_addr + IPS_REG_SCPR); outb(0, ha->io_addr + IPS_REG_SCPR);
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
if ((*ha->func.init) (ha)) if ((*ha->func.init) (ha))
break; break;
...@@ -5352,12 +5352,12 @@ ips_reset_copperhead_memio(ips_ha_t * ha) ...@@ -5352,12 +5352,12 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR); writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
writeb(0, ha->mem_ptr + IPS_REG_SCPR); writeb(0, ha->mem_ptr + IPS_REG_SCPR);
/* Delay for 1 Second */ /* Delay for 1 Second */
msleep(IPS_ONE_SEC); MDELAY(IPS_ONE_SEC);
if ((*ha->func.init) (ha)) if ((*ha->func.init) (ha))
break; break;
...@@ -5398,7 +5398,7 @@ ips_reset_morpheus(ips_ha_t * ha) ...@@ -5398,7 +5398,7 @@ ips_reset_morpheus(ips_ha_t * ha)
writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR); writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
/* Delay for 5 Seconds */ /* Delay for 5 Seconds */
msleep(5 * IPS_ONE_SEC); MDELAY(5 * IPS_ONE_SEC);
/* Do a PCI config read to wait for adapter */ /* Do a PCI config read to wait for adapter */
pci_read_config_byte(ha->pcidev, 4, &junk); pci_read_config_byte(ha->pcidev, 4, &junk);
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#define _IPS_H_ #define _IPS_H_
#include <linux/version.h> #include <linux/version.h>
#include <linux/nmi.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -116,9 +117,11 @@ ...@@ -116,9 +117,11 @@
dev_printk(level , &((pcidev)->dev) , format , ## arg) dev_printk(level , &((pcidev)->dev) , format , ## arg)
#endif #endif
#ifndef MDELAY #define MDELAY(n) \
#define MDELAY mdelay do { \
#endif mdelay(n); \
touch_nmi_watchdog(); \
} while (0)
#ifndef min #ifndef min
#define min(x,y) ((x) < (y) ? x : y) #define min(x,y) ((x) < (y) ? x : y)
......
...@@ -597,10 +597,15 @@ static struct domain_device *sas_ex_discover_end_dev( ...@@ -597,10 +597,15 @@ static struct domain_device *sas_ex_discover_end_dev(
child->iproto = phy->attached_iproto; child->iproto = phy->attached_iproto;
memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
sas_hash_addr(child->hashed_sas_addr, child->sas_addr); sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); if (!phy->port) {
BUG_ON(!phy->port); phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
/* FIXME: better error handling*/ if (unlikely(!phy->port))
BUG_ON(sas_port_add(phy->port) != 0); goto out_err;
if (unlikely(sas_port_add(phy->port) != 0)) {
sas_port_free(phy->port);
goto out_err;
}
}
sas_ex_get_linkrate(parent, child, phy); sas_ex_get_linkrate(parent, child, phy);
if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) { if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
...@@ -615,8 +620,7 @@ static struct domain_device *sas_ex_discover_end_dev( ...@@ -615,8 +620,7 @@ static struct domain_device *sas_ex_discover_end_dev(
SAS_DPRINTK("report phy sata to %016llx:0x%x returned " SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
"0x%x\n", SAS_ADDR(parent->sas_addr), "0x%x\n", SAS_ADDR(parent->sas_addr),
phy_id, res); phy_id, res);
kfree(child); goto out_free;
return NULL;
} }
memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis, memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis)); sizeof(struct dev_to_host_fis));
...@@ -627,14 +631,14 @@ static struct domain_device *sas_ex_discover_end_dev( ...@@ -627,14 +631,14 @@ static struct domain_device *sas_ex_discover_end_dev(
"%016llx:0x%x returned 0x%x\n", "%016llx:0x%x returned 0x%x\n",
SAS_ADDR(child->sas_addr), SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res); SAS_ADDR(parent->sas_addr), phy_id, res);
kfree(child); goto out_free;
return NULL;
} }
} else if (phy->attached_tproto & SAS_PROTO_SSP) { } else if (phy->attached_tproto & SAS_PROTO_SSP) {
child->dev_type = SAS_END_DEV; child->dev_type = SAS_END_DEV;
rphy = sas_end_device_alloc(phy->port); rphy = sas_end_device_alloc(phy->port);
/* FIXME: error handling */ /* FIXME: error handling */
BUG_ON(!rphy); if (unlikely(!rphy))
goto out_free;
child->tproto = phy->attached_tproto; child->tproto = phy->attached_tproto;
sas_init_dev(child); sas_init_dev(child);
...@@ -651,9 +655,7 @@ static struct domain_device *sas_ex_discover_end_dev( ...@@ -651,9 +655,7 @@ static struct domain_device *sas_ex_discover_end_dev(
"at %016llx:0x%x returned 0x%x\n", "at %016llx:0x%x returned 0x%x\n",
SAS_ADDR(child->sas_addr), SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res); SAS_ADDR(parent->sas_addr), phy_id, res);
/* FIXME: this kfrees list elements without removing them */ goto out_list_del;
//kfree(child);
return NULL;
} }
} else { } else {
SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
...@@ -663,6 +665,16 @@ static struct domain_device *sas_ex_discover_end_dev( ...@@ -663,6 +665,16 @@ static struct domain_device *sas_ex_discover_end_dev(
list_add_tail(&child->siblings, &parent_ex->children); list_add_tail(&child->siblings, &parent_ex->children);
return child; return child;
out_list_del:
list_del(&child->dev_list_node);
sas_rphy_free(rphy);
out_free:
sas_port_delete(phy->port);
out_err:
phy->port = NULL;
kfree(child);
return NULL;
} }
static struct domain_device *sas_ex_discover_expander( static struct domain_device *sas_ex_discover_expander(
......
...@@ -112,6 +112,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha) ...@@ -112,6 +112,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
} }
} }
INIT_LIST_HEAD(&sas_ha->eh_done_q);
return 0; return 0;
Undo_ports: Undo_ports:
...@@ -142,7 +144,7 @@ static int sas_get_linkerrors(struct sas_phy *phy) ...@@ -142,7 +144,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
return sas_smp_get_phy_events(phy); return sas_smp_get_phy_events(phy);
} }
static int sas_phy_reset(struct sas_phy *phy, int hard_reset) int sas_phy_reset(struct sas_phy *phy, int hard_reset)
{ {
int ret; int ret;
enum phy_func reset_type; enum phy_func reset_type;
......
...@@ -29,9 +29,11 @@ ...@@ -29,9 +29,11 @@
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h> #include <scsi/scsi_tcq.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h> #include <scsi/scsi_transport_sas.h>
#include "../scsi_sas_internal.h" #include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h"
#include <linux/err.h> #include <linux/err.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
...@@ -46,6 +48,7 @@ static void sas_scsi_task_done(struct sas_task *task) ...@@ -46,6 +48,7 @@ static void sas_scsi_task_done(struct sas_task *task)
{ {
struct task_status_struct *ts = &task->task_status; struct task_status_struct *ts = &task->task_status;
struct scsi_cmnd *sc = task->uldd_task; struct scsi_cmnd *sc = task->uldd_task;
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
unsigned ts_flags = task->task_state_flags; unsigned ts_flags = task->task_state_flags;
int hs = 0, stat = 0; int hs = 0, stat = 0;
...@@ -116,7 +119,7 @@ static void sas_scsi_task_done(struct sas_task *task) ...@@ -116,7 +119,7 @@ static void sas_scsi_task_done(struct sas_task *task)
sas_free_task(task); sas_free_task(task);
/* This is very ugly but this is how SCSI Core works. */ /* This is very ugly but this is how SCSI Core works. */
if (ts_flags & SAS_TASK_STATE_ABORTED) if (ts_flags & SAS_TASK_STATE_ABORTED)
scsi_finish_command(sc); scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
else else
sc->scsi_done(sc); sc->scsi_done(sc);
} }
...@@ -307,6 +310,15 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task) ...@@ -307,6 +310,15 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
spin_unlock_irqrestore(&core->task_queue_lock, flags); spin_unlock_irqrestore(&core->task_queue_lock, flags);
} }
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: task 0x%p already aborted\n",
__FUNCTION__, task);
return TASK_IS_ABORTED;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
res = si->dft->lldd_abort_task(task); res = si->dft->lldd_abort_task(task);
...@@ -409,13 +421,16 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) ...@@ -409,13 +421,16 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
SAS_DPRINTK("going over list...\n"); SAS_DPRINTK("going over list...\n");
list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd); struct sas_task *task = TO_SAS_TASK(cmd);
list_del_init(&cmd->eh_entry);
if (!task) {
SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__);
continue;
}
SAS_DPRINTK("trying to find task 0x%p\n", task); SAS_DPRINTK("trying to find task 0x%p\n", task);
list_del_init(&cmd->eh_entry);
res = sas_scsi_find_task(task); res = sas_scsi_find_task(task);
cmd->eh_eflags = 0; cmd->eh_eflags = 0;
shost->host_failed--;
switch (res) { switch (res) {
case TASK_IS_DONE: case TASK_IS_DONE:
...@@ -491,6 +506,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) ...@@ -491,6 +506,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
} }
} }
out: out:
scsi_eh_flush_done_q(&ha->eh_done_q);
SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
return; return;
clear_q: clear_q:
...@@ -508,12 +524,18 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) ...@@ -508,12 +524,18 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
unsigned long flags; unsigned long flags;
if (!task) { if (!task) {
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n",
cmd, task); cmd, task);
return EH_HANDLED; return EH_HANDLED;
} }
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
"EH_NOT_HANDLED\n", cmd, task);
return EH_NOT_HANDLED;
}
if (task->task_state_flags & SAS_TASK_STATE_DONE) { if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
...@@ -777,6 +799,64 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha) ...@@ -777,6 +799,64 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
spin_unlock_irqrestore(&core->task_queue_lock, flags); spin_unlock_irqrestore(&core->task_queue_lock, flags);
} }
static int do_sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
struct sas_internal *si =
to_sas_internal(task->dev->port->ha->core.shost->transportt);
unsigned long flags;
int res;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__,
task);
return 0;
}
task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (!si->dft->lldd_abort_task)
return -ENODEV;
res = si->dft->lldd_abort_task(task);
if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
(res == TMF_RESP_FUNC_COMPLETE))
{
/* SMP commands don't have scsi_cmds(?) */
if (!sc) {
task->task_done(task);
return 0;
}
scsi_req_abort_cmd(sc);
scsi_schedule_eh(sc->device->host);
return 0;
}
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
return -EAGAIN;
}
void sas_task_abort(struct sas_task *task)
{
int i;
for (i = 0; i < 5; i++)
if (!do_sas_task_abort(task))
return;
SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__);
}
EXPORT_SYMBOL_GPL(sas_queuecommand); EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc); EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure); EXPORT_SYMBOL_GPL(sas_slave_configure);
...@@ -784,3 +864,5 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy); ...@@ -784,3 +864,5 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy);
EXPORT_SYMBOL_GPL(sas_change_queue_depth); EXPORT_SYMBOL_GPL(sas_change_queue_depth);
EXPORT_SYMBOL_GPL(sas_change_queue_type); EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param); EXPORT_SYMBOL_GPL(sas_bios_param);
EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
/*
* SCSI RDAM Protocol lib functions
*
* Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/err.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_tgt.h>
#include <scsi/srp.h>
#include <scsi/libsrp.h>
enum srp_task_attributes {
SRP_SIMPLE_TASK = 0,
SRP_HEAD_TASK = 1,
SRP_ORDERED_TASK = 2,
SRP_ACA_TASK = 4
};
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
struct srp_buf **ring)
{
int i;
struct iu_entry *iue;
q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
if (!q->pool)
return -ENOMEM;
q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
if (!q->items)
goto free_pool;
spin_lock_init(&q->lock);
q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
GFP_KERNEL, &q->lock);
if (IS_ERR(q->queue))
goto free_item;
for (i = 0, iue = q->items; i < max; i++) {
__kfifo_put(q->queue, (void *) &iue, sizeof(void *));
iue->sbuf = ring[i];
iue++;
}
return 0;
free_item:
kfree(q->items);
free_pool:
kfree(q->pool);
return -ENOMEM;
}
static void srp_iu_pool_free(struct srp_queue *q)
{
kfree(q->items);
kfree(q->pool);
}
static struct srp_buf **srp_ring_alloc(struct device *dev,
size_t max, size_t size)
{
int i;
struct srp_buf **ring;
ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
if (!ring)
return NULL;
for (i = 0; i < max; i++) {
ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
if (!ring[i])
goto out;
ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
GFP_KERNEL);
if (!ring[i]->buf)
goto out;
}
return ring;
out:
for (i = 0; i < max && ring[i]; i++) {
if (ring[i]->buf)
dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
kfree(ring[i]);
}
kfree(ring);
return NULL;
}
static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
size_t size)
{
int i;
for (i = 0; i < max; i++) {
dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
kfree(ring[i]);
}
}
int srp_target_alloc(struct srp_target *target, struct device *dev,
size_t nr, size_t iu_size)
{
int err;
spin_lock_init(&target->lock);
INIT_LIST_HEAD(&target->cmd_queue);
target->dev = dev;
target->dev->driver_data = target;
target->srp_iu_size = iu_size;
target->rx_ring_size = nr;
target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
if (!target->rx_ring)
return -ENOMEM;
err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
if (err)
goto free_ring;
return 0;
free_ring:
srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(srp_target_alloc);
void srp_target_free(struct srp_target *target)
{
srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
target->srp_iu_size);
srp_iu_pool_free(&target->iu_queue);
}
EXPORT_SYMBOL_GPL(srp_target_free);
struct iu_entry *srp_iu_get(struct srp_target *target)
{
struct iu_entry *iue = NULL;
kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
if (!iue)
return iue;
iue->target = target;
INIT_LIST_HEAD(&iue->ilist);
iue->flags = 0;
return iue;
}
EXPORT_SYMBOL_GPL(srp_iu_get);
void srp_iu_put(struct iu_entry *iue)
{
kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
}
EXPORT_SYMBOL_GPL(srp_iu_put);
static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
enum dma_data_direction dir, srp_rdma_t rdma_io,
int dma_map, int ext_desc)
{
struct iu_entry *iue = NULL;
struct scatterlist *sg = NULL;
int err, nsg = 0, len;
if (dma_map) {
iue = (struct iu_entry *) sc->SCp.ptr;
sg = sc->request_buffer;
dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
md->len, sc->use_sg);
nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
DMA_BIDIRECTIONAL);
if (!nsg) {
printk("fail to map %p %d\n", iue, sc->use_sg);
return 0;
}
len = min(sc->request_bufflen, md->len);
} else
len = md->len;
err = rdma_io(sc, sg, nsg, md, 1, dir, len);
if (dma_map)
dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
return err;
}
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
struct srp_indirect_buf *id,
enum dma_data_direction dir, srp_rdma_t rdma_io,
int dma_map, int ext_desc)
{
struct iu_entry *iue = NULL;
struct srp_direct_buf *md = NULL;
struct scatterlist dummy, *sg = NULL;
dma_addr_t token = 0;
long err;
unsigned int done = 0;
int nmd, nsg = 0, len;
if (dma_map || ext_desc) {
iue = (struct iu_entry *) sc->SCp.ptr;
sg = sc->request_buffer;
dprintk("%p %u %u %d %d\n",
iue, sc->request_bufflen, id->len,
cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
}
nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
(dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
md = &id->desc_list[0];
goto rdma;
}
if (ext_desc && dma_map) {
md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
&token, GFP_KERNEL);
if (!md) {
eprintk("Can't get dma memory %u\n", id->table_desc.len);
return -ENOMEM;
}
sg_init_one(&dummy, md, id->table_desc.len);
sg_dma_address(&dummy) = token;
err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
id->table_desc.len);
if (err < 0) {
eprintk("Error copying indirect table %ld\n", err);
goto free_mem;
}
} else {
eprintk("This command uses external indirect buffer\n");
return -EINVAL;
}
rdma:
if (dma_map) {
nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
if (!nsg) {
eprintk("fail to map %p %d\n", iue, sc->use_sg);
goto free_mem;
}
len = min(sc->request_bufflen, id->len);
} else
len = id->len;
err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
if (dma_map)
dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
free_mem:
if (token && dma_map)
dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
return done;
}
static int data_out_desc_size(struct srp_cmd *cmd)
{
int size = 0;
u8 fmt = cmd->buf_fmt >> 4;
switch (fmt) {
case SRP_NO_DATA_DESC:
break;
case SRP_DATA_DESC_DIRECT:
size = sizeof(struct srp_direct_buf);
break;
case SRP_DATA_DESC_INDIRECT:
size = sizeof(struct srp_indirect_buf) +
sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
break;
default:
eprintk("client error. Invalid data_out_format %x\n", fmt);
break;
}
return size;
}
/*
* TODO: this can be called multiple times for a single command if it
* has very long data.
*/
int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
srp_rdma_t rdma_io, int dma_map, int ext_desc)
{
struct srp_direct_buf *md;
struct srp_indirect_buf *id;
enum dma_data_direction dir;
int offset, err = 0;
u8 format;
offset = cmd->add_cdb_len * 4;
dir = srp_cmd_direction(cmd);
if (dir == DMA_FROM_DEVICE)
offset += data_out_desc_size(cmd);
if (dir == DMA_TO_DEVICE)
format = cmd->buf_fmt >> 4;
else
format = cmd->buf_fmt & ((1U << 4) - 1);
switch (format) {
case SRP_NO_DATA_DESC:
break;
case SRP_DATA_DESC_DIRECT:
md = (struct srp_direct_buf *)
(cmd->add_data + offset);
err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc);
break;
case SRP_DATA_DESC_INDIRECT:
id = (struct srp_indirect_buf *)
(cmd->add_data + offset);
err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map,
ext_desc);
break;
default:
eprintk("Unknown format %d %x\n", dir, format);
break;
}
return err;
}
EXPORT_SYMBOL_GPL(srp_transfer_data);
static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
{
struct srp_direct_buf *md;
struct srp_indirect_buf *id;
int len = 0, offset = cmd->add_cdb_len * 4;
u8 fmt;
if (dir == DMA_TO_DEVICE)
fmt = cmd->buf_fmt >> 4;
else {
fmt = cmd->buf_fmt & ((1U << 4) - 1);
offset += data_out_desc_size(cmd);
}
switch (fmt) {
case SRP_NO_DATA_DESC:
break;
case SRP_DATA_DESC_DIRECT:
md = (struct srp_direct_buf *) (cmd->add_data + offset);
len = md->len;
break;
case SRP_DATA_DESC_INDIRECT:
id = (struct srp_indirect_buf *) (cmd->add_data + offset);
len = id->len;
break;
default:
eprintk("invalid data format %x\n", fmt);
break;
}
return len;
}
int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
u64 addr)
{
enum dma_data_direction dir;
struct scsi_cmnd *sc;
int tag, len, err;
switch (cmd->task_attr) {
case SRP_SIMPLE_TASK:
tag = MSG_SIMPLE_TAG;
break;
case SRP_ORDERED_TASK:
tag = MSG_ORDERED_TAG;
break;
case SRP_HEAD_TASK:
tag = MSG_HEAD_TAG;
break;
default:
eprintk("Task attribute %d not supported\n", cmd->task_attr);
tag = MSG_ORDERED_TAG;
}
dir = srp_cmd_direction(cmd);
len = vscsis_data_length(cmd, dir);
dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0],
cmd->lun, dir, len, tag, (unsigned long long) cmd->tag);
sc = scsi_host_get_command(shost, dir, GFP_KERNEL);
if (!sc)
return -ENOMEM;
sc->SCp.ptr = info;
memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
sc->request_bufflen = len;
sc->request_buffer = (void *) (unsigned long) addr;
sc->tag = tag;
err = scsi_tgt_queue_command(sc, (struct scsi_lun *) &cmd->lun, cmd->tag);
if (err)
scsi_host_put_command(shost, sc);
return err;
}
EXPORT_SYMBOL_GPL(srp_cmd_queue);
MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
MODULE_AUTHOR("FUJITA Tomonori");
MODULE_LICENSE("GPL");
...@@ -296,13 +296,17 @@ struct lpfc_hba { ...@@ -296,13 +296,17 @@ struct lpfc_hba {
uint32_t cfg_cr_delay; uint32_t cfg_cr_delay;
uint32_t cfg_cr_count; uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support; uint32_t cfg_multi_ring_support;
uint32_t cfg_multi_ring_rctl;
uint32_t cfg_multi_ring_type;
uint32_t cfg_fdmi_on; uint32_t cfg_fdmi_on;
uint32_t cfg_discovery_threads; uint32_t cfg_discovery_threads;
uint32_t cfg_max_luns; uint32_t cfg_max_luns;
uint32_t cfg_poll; uint32_t cfg_poll;
uint32_t cfg_poll_tmo; uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size; uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn; uint64_t cfg_soft_wwpn;
uint32_t dev_loss_tmo_changed; uint32_t dev_loss_tmo_changed;
...@@ -355,7 +359,7 @@ struct lpfc_hba { ...@@ -355,7 +359,7 @@ struct lpfc_hba {
#define VPD_PORT 0x8 /* valid vpd port data */ #define VPD_PORT 0x8 /* valid vpd port data */
#define VPD_MASK 0xf /* mask for any vpd data */ #define VPD_MASK 0xf /* mask for any vpd data */
uint8_t soft_wwpn_enable; uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer; struct timer_list fcp_poll_timer;
struct timer_list els_tmofunc; struct timer_list els_tmofunc;
......
...@@ -552,10 +552,10 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, ...@@ -552,10 +552,10 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
static char *lpfc_soft_wwpn_key = "C99G71SL8032A"; static char *lpfc_soft_wwn_key = "C99G71SL8032A";
static ssize_t static ssize_t
lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf, lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
size_t count) size_t count)
{ {
struct Scsi_Host *host = class_to_shost(cdev); struct Scsi_Host *host = class_to_shost(cdev);
...@@ -579,15 +579,15 @@ lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf, ...@@ -579,15 +579,15 @@ lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
if (buf[cnt-1] == '\n') if (buf[cnt-1] == '\n')
cnt--; cnt--;
if ((cnt != strlen(lpfc_soft_wwpn_key)) || if ((cnt != strlen(lpfc_soft_wwn_key)) ||
(strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0)) (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
return -EINVAL; return -EINVAL;
phba->soft_wwpn_enable = 1; phba->soft_wwn_enable = 1;
return count; return count;
} }
static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL, static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
lpfc_soft_wwpn_enable_store); lpfc_soft_wwn_enable_store);
static ssize_t static ssize_t
lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
...@@ -613,12 +613,12 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) ...@@ -613,12 +613,12 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
if (buf[cnt-1] == '\n') if (buf[cnt-1] == '\n')
cnt--; cnt--;
if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) || if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
((cnt == 17) && (*buf++ != 'x')) || ((cnt == 17) && (*buf++ != 'x')) ||
((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
return -EINVAL; return -EINVAL;
phba->soft_wwpn_enable = 0; phba->soft_wwn_enable = 0;
memset(wwpn, 0, sizeof(wwpn)); memset(wwpn, 0, sizeof(wwpn));
...@@ -639,6 +639,8 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) ...@@ -639,6 +639,8 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
} }
phba->cfg_soft_wwpn = wwn_to_u64(wwpn); phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
fc_host_port_name(host) = phba->cfg_soft_wwpn; fc_host_port_name(host) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
fc_host_node_name(host) = phba->cfg_soft_wwnn;
dev_printk(KERN_NOTICE, &phba->pcidev->dev, dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
...@@ -664,6 +666,66 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) ...@@ -664,6 +666,66 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
static ssize_t
lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwnn);
}
static ssize_t
lpfc_soft_wwnn_store(struct class_device *cdev, const char *buf, size_t count)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
unsigned int i, j, cnt=count;
u8 wwnn[8];
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
cnt--;
if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
((cnt == 17) && (*buf++ != 'x')) ||
((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
return -EINVAL;
/*
* Allow wwnn to be set many times, as long as the enable is set.
* However, once the wwpn is set, everything locks.
*/
memset(wwnn, 0, sizeof(wwnn));
/* Validate and store the new name */
for (i=0, j=0; i < 16; i++) {
if ((*buf >= 'a') && (*buf <= 'f'))
j = ((j << 4) | ((*buf++ -'a') + 10));
else if ((*buf >= 'A') && (*buf <= 'F'))
j = ((j << 4) | ((*buf++ -'A') + 10));
else if ((*buf >= '0') && (*buf <= '9'))
j = ((j << 4) | (*buf++ -'0'));
else
return -EINVAL;
if (i % 2) {
wwnn[i/2] = j & 0xff;
j = 0;
}
}
phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: soft_wwnn set. Value will take effect upon "
"setting of the soft_wwpn\n", phba->brd_no);
return count;
}
static CLASS_DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
static int lpfc_poll = 0; static int lpfc_poll = 0;
module_param(lpfc_poll, int, 0); module_param(lpfc_poll, int, 0);
...@@ -802,12 +864,11 @@ static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, ...@@ -802,12 +864,11 @@ static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
# LOG_MBOX 0x4 Mailbox events # LOG_MBOX 0x4 Mailbox events
# LOG_INIT 0x8 Initialization events # LOG_INIT 0x8 Initialization events
# LOG_LINK_EVENT 0x10 Link events # LOG_LINK_EVENT 0x10 Link events
# LOG_IP 0x20 IP traffic history
# LOG_FCP 0x40 FCP traffic history # LOG_FCP 0x40 FCP traffic history
# LOG_NODE 0x80 Node table events # LOG_NODE 0x80 Node table events
# LOG_MISC 0x400 Miscellaneous events # LOG_MISC 0x400 Miscellaneous events
# LOG_SLI 0x800 SLI events # LOG_SLI 0x800 SLI events
# LOG_CHK_COND 0x1000 FCP Check condition flag # LOG_FCP_ERROR 0x1000 Only log FCP errors
# LOG_LIBDFC 0x2000 LIBDFC events # LOG_LIBDFC 0x2000 LIBDFC events
# LOG_ALL_MSG 0xffff LOG all messages # LOG_ALL_MSG 0xffff LOG all messages
*/ */
...@@ -915,6 +976,22 @@ LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an " ...@@ -915,6 +976,22 @@ LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary " LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
"SLI rings to spread IOCB entries across"); "SLI rings to spread IOCB entries across");
/*
# lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
# identifies what rctl value to configure the additional ring for.
# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
*/
LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
255, "Identifies RCTL for additional ring configuration");
/*
# lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
# identifies what type value to configure the additional ring for.
# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
*/
LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
255, "Identifies TYPE for additional ring configuration");
/* /*
# lpfc_fdmi_on: controls FDMI support. # lpfc_fdmi_on: controls FDMI support.
# 0 = no FDMI support # 0 = no FDMI support
...@@ -946,6 +1023,15 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535, ...@@ -946,6 +1023,15 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535,
LPFC_ATTR_RW(poll_tmo, 10, 1, 255, LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
"Milliseconds driver will wait between polling FCP ring"); "Milliseconds driver will wait between polling FCP ring");
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
# 0 = MSI disabled (default)
# 1 = MSI enabled
# Value range is [0,1]. Default value is 0.
*/
LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
struct class_device_attribute *lpfc_host_attrs[] = { struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_info, &class_device_attr_info,
...@@ -974,6 +1060,8 @@ struct class_device_attribute *lpfc_host_attrs[] = { ...@@ -974,6 +1060,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_cr_delay, &class_device_attr_lpfc_cr_delay,
&class_device_attr_lpfc_cr_count, &class_device_attr_lpfc_cr_count,
&class_device_attr_lpfc_multi_ring_support, &class_device_attr_lpfc_multi_ring_support,
&class_device_attr_lpfc_multi_ring_rctl,
&class_device_attr_lpfc_multi_ring_type,
&class_device_attr_lpfc_fdmi_on, &class_device_attr_lpfc_fdmi_on,
&class_device_attr_lpfc_max_luns, &class_device_attr_lpfc_max_luns,
&class_device_attr_nport_evt_cnt, &class_device_attr_nport_evt_cnt,
...@@ -982,8 +1070,10 @@ struct class_device_attribute *lpfc_host_attrs[] = { ...@@ -982,8 +1070,10 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_issue_reset, &class_device_attr_issue_reset,
&class_device_attr_lpfc_poll, &class_device_attr_lpfc_poll,
&class_device_attr_lpfc_poll_tmo, &class_device_attr_lpfc_poll_tmo,
&class_device_attr_lpfc_use_msi,
&class_device_attr_lpfc_soft_wwnn,
&class_device_attr_lpfc_soft_wwpn, &class_device_attr_lpfc_soft_wwpn,
&class_device_attr_lpfc_soft_wwpn_enable, &class_device_attr_lpfc_soft_wwn_enable,
NULL, NULL,
}; };
...@@ -1771,6 +1861,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) ...@@ -1771,6 +1861,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_cr_delay_init(phba, lpfc_cr_delay); lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count); lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth); lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
lpfc_fcp_class_init(phba, lpfc_fcp_class); lpfc_fcp_class_init(phba, lpfc_fcp_class);
lpfc_use_adisc_init(phba, lpfc_use_adisc); lpfc_use_adisc_init(phba, lpfc_use_adisc);
...@@ -1782,9 +1874,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) ...@@ -1782,9 +1874,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_discovery_threads_init(phba, lpfc_discovery_threads); lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
lpfc_max_luns_init(phba, lpfc_max_luns); lpfc_max_luns_init(phba, lpfc_max_luns);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
phba->cfg_poll = lpfc_poll; phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L; phba->cfg_soft_wwpn = 0L;
/* /*
......
...@@ -558,6 +558,14 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, ...@@ -558,6 +558,14 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
return; return;
} }
static void
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
void void
lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
{ {
...@@ -629,6 +637,8 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) ...@@ -629,6 +637,8 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
bpl->tus.f.bdeSize = RNN_REQUEST_SZ; bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN) else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
else else
bpl->tus.f.bdeSize = 0; bpl->tus.f.bdeSize = 0;
bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl->tus.w = le32_to_cpu(bpl->tus.w);
...@@ -660,6 +670,17 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) ...@@ -660,6 +670,17 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
cmpl = lpfc_cmpl_ct_cmd_rft_id; cmpl = lpfc_cmpl_ct_cmd_rft_id;
break; break;
case SLI_CTNS_RFF_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
CtReq->un.rff.feature_res = 0;
CtReq->un.rff.feature_tgt = 0;
CtReq->un.rff.type_code = FC_FCP_DATA;
CtReq->un.rff.feature_init = 1;
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
case SLI_CTNS_RNN_ID: case SLI_CTNS_RNN_ID:
CtReq->CommandResponse.bits.CmdRsp = CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RNN_ID); be16_to_cpu(SLI_CTNS_RNN_ID);
...@@ -934,7 +955,8 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) ...@@ -934,7 +955,8 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION); ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
sprintf(ae->un.OsNameVersion, "%s %s %s", sprintf(ae->un.OsNameVersion, "%s %s %s",
init_utsname()->sysname, init_utsname()->release, init_utsname()->sysname,
init_utsname()->release,
init_utsname()->version); init_utsname()->version);
len = strlen(ae->un.OsNameVersion); len = strlen(ae->un.OsNameVersion);
len += (len & 3) ? (4 - (len & 3)) : 4; len += (len & 3) ? (4 - (len & 3)) : 4;
......
...@@ -243,6 +243,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, ...@@ -243,6 +243,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, IOCB_t *irsp) struct serv_parm *sp, IOCB_t *irsp)
{ {
LPFC_MBOXQ_t *mbox; LPFC_MBOXQ_t *mbox;
struct lpfc_dmabuf *mp;
int rc; int rc;
spin_lock_irq(phba->host->host_lock); spin_lock_irq(phba->host->host_lock);
...@@ -307,10 +308,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, ...@@ -307,10 +308,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED) if (rc == MBX_NOT_FINISHED)
goto fail_free_mbox; goto fail_issue_reg_login;
return 0; return 0;
fail_issue_reg_login:
mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
fail_free_mbox: fail_free_mbox:
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
fail: fail:
...@@ -657,6 +662,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp, ...@@ -657,6 +662,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
uint8_t name[sizeof (struct lpfc_name)]; uint8_t name[sizeof (struct lpfc_name)];
uint32_t rc; uint32_t rc;
/* Fabric nodes can have the same WWPN so we don't bother searching
* by WWPN. Just return the ndlp that was given to us.
*/
if (ndlp->nlp_type & NLP_FABRIC)
return ndlp;
lp = (uint32_t *) prsp->virt; lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
memset(name, 0, sizeof (struct lpfc_name)); memset(name, 0, sizeof (struct lpfc_name));
...@@ -1122,7 +1133,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, ...@@ -1122,7 +1133,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
mempool_free(mbox, mempool_free(mbox,
phba->mbox_mem_pool); phba->mbox_mem_pool);
lpfc_disc_flush_list(phba); lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)]. psli->ring[(psli->extra_ring)].
flag &= flag &=
~LPFC_STOP_IOCB_EVENT; ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)]. psli->ring[(psli->fcp_ring)].
...@@ -1851,6 +1862,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, ...@@ -1851,6 +1862,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
IOCB_t *irsp; IOCB_t *irsp;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
LPFC_MBOXQ_t *mbox = NULL; LPFC_MBOXQ_t *mbox = NULL;
struct lpfc_dmabuf *mp;
irsp = &rspiocb->iocb; irsp = &rspiocb->iocb;
...@@ -1862,6 +1874,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, ...@@ -1862,6 +1874,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
/* Check to see if link went down during discovery */ /* Check to see if link went down during discovery */
if ((lpfc_els_chk_latt(phba)) || !ndlp) { if ((lpfc_els_chk_latt(phba)) || !ndlp) {
if (mbox) { if (mbox) {
mp = (struct lpfc_dmabuf *) mbox->context1;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
mempool_free( mbox, phba->mbox_mem_pool); mempool_free( mbox, phba->mbox_mem_pool);
} }
goto out; goto out;
...@@ -1893,9 +1910,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, ...@@ -1893,9 +1910,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
} }
/* NOTE: we should have messages for unsuccessful /* NOTE: we should have messages for unsuccessful
reglogin */ reglogin */
mempool_free( mbox, phba->mbox_mem_pool);
} else { } else {
mempool_free( mbox, phba->mbox_mem_pool);
/* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
...@@ -1907,6 +1922,12 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, ...@@ -1907,6 +1922,12 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
} }
} }
} }
mp = (struct lpfc_dmabuf *) mbox->context1;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
mempool_free(mbox, phba->mbox_mem_pool);
} }
out: out:
if (ndlp) { if (ndlp) {
...@@ -2644,6 +2665,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba) ...@@ -2644,6 +2665,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
ndlp->nlp_type |= NLP_FABRIC; ndlp->nlp_type |= NLP_FABRIC;
ndlp->nlp_prev_state = ndlp->nlp_state; ndlp->nlp_prev_state = ndlp->nlp_state;
ndlp->nlp_state = NLP_STE_PLOGI_ISSUE; ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
lpfc_issue_els_plogi(phba, NameServer_DID, 0); lpfc_issue_els_plogi(phba, NameServer_DID, 0);
/* Wait for NameServer login cmpl before we can /* Wait for NameServer login cmpl before we can
continue */ continue */
...@@ -3039,7 +3061,7 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba, ...@@ -3039,7 +3061,7 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
/* FARP-REQ received from DID <did> */ /* FARP-REQ received from DID <did> */
lpfc_printf_log(phba, lpfc_printf_log(phba,
KERN_INFO, KERN_INFO,
LOG_IP, LOG_ELS,
"%d:0601 FARP-REQ received from DID x%x\n", "%d:0601 FARP-REQ received from DID x%x\n",
phba->brd_no, did); phba->brd_no, did);
...@@ -3101,7 +3123,7 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba, ...@@ -3101,7 +3123,7 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
/* FARP-RSP received from DID <did> */ /* FARP-RSP received from DID <did> */
lpfc_printf_log(phba, lpfc_printf_log(phba,
KERN_INFO, KERN_INFO,
LOG_IP, LOG_ELS,
"%d:0600 FARP-RSP received from DID x%x\n", "%d:0600 FARP-RSP received from DID x%x\n",
phba->brd_no, did); phba->brd_no, did);
......
...@@ -525,7 +525,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) ...@@ -525,7 +525,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
psli = &phba->sli; psli = &phba->sli;
mb = &pmb->mb; mb = &pmb->mb;
/* Since we don't do discovery right now, turn these off here */ /* Since we don't do discovery right now, turn these off here */
psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
...@@ -641,7 +641,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ...@@ -641,7 +641,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (rc == MBX_NOT_FINISHED) { if (rc == MBX_NOT_FINISHED) {
mempool_free(pmb, phba->mbox_mem_pool); mempool_free(pmb, phba->mbox_mem_pool);
lpfc_disc_flush_list(phba); lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
phba->hba_state = LPFC_HBA_READY; phba->hba_state = LPFC_HBA_READY;
...@@ -672,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) ...@@ -672,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm)); sizeof (struct serv_parm));
if (phba->cfg_soft_wwnn)
u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn) if (phba->cfg_soft_wwpn)
u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
memcpy((uint8_t *) & phba->fc_nodename, memcpy((uint8_t *) & phba->fc_nodename,
...@@ -696,7 +698,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) ...@@ -696,7 +698,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
== MBX_NOT_FINISHED) { == MBX_NOT_FINISHED) {
mempool_free( pmb, phba->mbox_mem_pool); mempool_free( pmb, phba->mbox_mem_pool);
lpfc_disc_flush_list(phba); lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &= psli->ring[(psli->extra_ring)].flag &=
~LPFC_STOP_IOCB_EVENT; ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= psli->ring[(psli->fcp_ring)].flag &=
~LPFC_STOP_IOCB_EVENT; ~LPFC_STOP_IOCB_EVENT;
...@@ -715,6 +717,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) ...@@ -715,6 +717,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
{ {
int i; int i;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
struct lpfc_dmabuf *mp;
int rc;
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
...@@ -793,16 +798,27 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) ...@@ -793,16 +798,27 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
if (sparam_mbox) { if (sparam_mbox) {
lpfc_read_sparam(phba, sparam_mbox); lpfc_read_sparam(phba, sparam_mbox);
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
lpfc_sli_issue_mbox(phba, sparam_mbox, rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
(MBX_NOWAIT | MBX_STOP_IOCB)); (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(sparam_mbox, phba->mbox_mem_pool);
if (cfglink_mbox)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
return;
}
} }
if (cfglink_mbox) { if (cfglink_mbox) {
phba->hba_state = LPFC_LOCAL_CFG_LINK; phba->hba_state = LPFC_LOCAL_CFG_LINK;
lpfc_config_link(phba, cfglink_mbox); lpfc_config_link(phba, cfglink_mbox);
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
lpfc_sli_issue_mbox(phba, cfglink_mbox, rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
(MBX_NOWAIT | MBX_STOP_IOCB)); (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
} }
} }
...@@ -1067,6 +1083,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) ...@@ -1067,6 +1083,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID); lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN); lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID); lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
} }
phba->fc_ns_retry = 0; phba->fc_ns_retry = 0;
...@@ -1423,7 +1440,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, ...@@ -1423,7 +1440,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
if (iocb->context1 == (uint8_t *) ndlp) if (iocb->context1 == (uint8_t *) ndlp)
return 1; return 1;
} }
} else if (pring->ringno == psli->ip_ring) { } else if (pring->ringno == psli->extra_ring) {
} else if (pring->ringno == psli->fcp_ring) { } else if (pring->ringno == psli->fcp_ring) {
/* Skip match check if waiting to relogin to FCP target */ /* Skip match check if waiting to relogin to FCP target */
...@@ -1680,112 +1697,38 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did) ...@@ -1680,112 +1697,38 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
struct lpfc_nodelist * struct lpfc_nodelist *
lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
{ {
struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_nodelist *ndlp;
struct list_head *lists[]={&phba->fc_nlpunmap_list,
&phba->fc_nlpmap_list,
&phba->fc_plogi_list,
&phba->fc_adisc_list,
&phba->fc_reglogin_list,
&phba->fc_prli_list,
&phba->fc_npr_list,
&phba->fc_unused_list};
uint32_t search[]={NLP_SEARCH_UNMAPPED,
NLP_SEARCH_MAPPED,
NLP_SEARCH_PLOGI,
NLP_SEARCH_ADISC,
NLP_SEARCH_REGLOGIN,
NLP_SEARCH_PRLI,
NLP_SEARCH_NPR,
NLP_SEARCH_UNUSED};
int i;
uint32_t data1; uint32_t data1;
spin_lock_irq(phba->host->host_lock); spin_lock_irq(phba->host->host_lock);
if (order & NLP_SEARCH_UNMAPPED) { for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
list_for_each_entry_safe(ndlp, next_ndlp, if (!(order & search[i]))
&phba->fc_nlpunmap_list, nlp_listp) { continue;
if (lpfc_matchdid(phba, ndlp, did)) { list_for_each_entry(ndlp, lists[i], nlp_listp) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* FIND node DID unmapped */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0929 FIND node DID unmapped"
" Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_MAPPED) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* FIND node DID mapped */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0930 FIND node DID mapped "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_PLOGI) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to PLOGI */
/* FIND node DID plogi */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0908 FIND node DID plogi "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_ADISC) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to ADISC */
/* FIND node DID adisc */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0931 FIND node DID adisc "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_REGLOGIN) {
list_for_each_entry_safe(ndlp, next_ndlp,
&phba->fc_reglogin_list, nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) { if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) | data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) | ((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) | ((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff)); ((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to REGLOGIN */
/* FIND node DID reglogin */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE, lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0901 FIND node DID reglogin" "%d:0929 FIND node DID "
" Data: x%p x%x x%x x%x\n", " Data: x%p x%x x%x x%x\n",
phba->brd_no, phba->brd_no,
ndlp, ndlp->nlp_DID, ndlp, ndlp->nlp_DID,
...@@ -1795,86 +1738,12 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did) ...@@ -1795,86 +1738,12 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
} }
} }
} }
if (order & NLP_SEARCH_PRLI) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to PRLI */
/* FIND node DID prli */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0902 FIND node DID prli "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_NPR) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to NPR */
/* FIND node DID npr */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0903 FIND node DID npr "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
if (order & NLP_SEARCH_UNUSED) {
list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
nlp_listp) {
if (lpfc_matchdid(phba, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
/* LOG change to UNUSED */
/* FIND node DID unused */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d:0905 FIND node DID unused "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
}
spin_unlock_irq(phba->host->host_lock); spin_unlock_irq(phba->host->host_lock);
/* FIND node did <did> NOT FOUND */ /* FIND node did <did> NOT FOUND */
lpfc_printf_log(phba, lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
KERN_INFO,
LOG_NODE,
"%d:0932 FIND node did x%x NOT FOUND Data: x%x\n", "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
phba->brd_no, did, order); phba->brd_no, did, order);
/* no match found */
return NULL; return NULL;
} }
...@@ -2036,7 +1905,7 @@ lpfc_disc_start(struct lpfc_hba * phba) ...@@ -2036,7 +1905,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
if (rc == MBX_NOT_FINISHED) { if (rc == MBX_NOT_FINISHED) {
mempool_free( mbox, phba->mbox_mem_pool); mempool_free( mbox, phba->mbox_mem_pool);
lpfc_disc_flush_list(phba); lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &= psli->ring[(psli->extra_ring)].flag &=
~LPFC_STOP_IOCB_EVENT; ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= psli->ring[(psli->fcp_ring)].flag &=
~LPFC_STOP_IOCB_EVENT; ~LPFC_STOP_IOCB_EVENT;
...@@ -2415,7 +2284,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba) ...@@ -2415,7 +2284,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
if (clrlaerr) { if (clrlaerr) {
lpfc_disc_flush_list(phba); lpfc_disc_flush_list(phba);
psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
phba->hba_state = LPFC_HBA_READY; phba->hba_state = LPFC_HBA_READY;
......
...@@ -42,14 +42,14 @@ ...@@ -42,14 +42,14 @@
#define FCELSSIZE 1024 /* maximum ELS transfer size */ #define FCELSSIZE 1024 /* maximum ELS transfer size */
#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */ #define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
#define LPFC_IP_RING 1 /* ring 1 for IP commands */ #define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ #define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
#define LPFC_FCP_NEXT_RING 3 #define LPFC_FCP_NEXT_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ #define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ #define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */ #define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 extra command ring entries */
#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */ #define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 extra response ring entries */
#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */ #define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */ #define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */ #define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
...@@ -121,6 +121,20 @@ struct lpfc_sli_ct_request { ...@@ -121,6 +121,20 @@ struct lpfc_sli_ct_request {
uint32_t rsvd[7]; uint32_t rsvd[7];
} rft; } rft;
struct rff {
uint32_t PortId;
uint8_t reserved[2];
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t feature_res:6;
uint8_t feature_init:1;
uint8_t feature_tgt:1;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint8_t feature_tgt:1;
uint8_t feature_init:1;
uint8_t feature_res:6;
#endif
uint8_t type_code; /* type=8 for FCP */
} rff;
struct rnn { struct rnn {
uint32_t PortId; /* For RNN_ID requests */ uint32_t PortId; /* For RNN_ID requests */
uint8_t wwnn[8]; uint8_t wwnn[8];
...@@ -136,6 +150,7 @@ struct lpfc_sli_ct_request { ...@@ -136,6 +150,7 @@ struct lpfc_sli_ct_request {
#define SLI_CT_REVISION 1 #define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260) #define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228) #define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235)
#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252) #define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request)) #define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
...@@ -225,6 +240,7 @@ struct lpfc_sli_ct_request { ...@@ -225,6 +240,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RNN_ID 0x0213 #define SLI_CTNS_RNN_ID 0x0213
#define SLI_CTNS_RCS_ID 0x0214 #define SLI_CTNS_RCS_ID 0x0214
#define SLI_CTNS_RFT_ID 0x0217 #define SLI_CTNS_RFT_ID 0x0217
#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RSPN_ID 0x0218 #define SLI_CTNS_RSPN_ID 0x0218
#define SLI_CTNS_RPT_ID 0x021A #define SLI_CTNS_RPT_ID 0x021A
#define SLI_CTNS_RIP_NN 0x0235 #define SLI_CTNS_RIP_NN 0x0235
...@@ -1089,12 +1105,6 @@ typedef struct { ...@@ -1089,12 +1105,6 @@ typedef struct {
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_SUBSYSTEM_ID_LP11000S 0xfc11
#define PCI_SUBSYSTEM_ID_LP11002S 0xfc12
#define PCI_SUBSYSTEM_ID_LPE11000S 0xfc21
#define PCI_SUBSYSTEM_ID_LPE11002S 0xfc22
#define PCI_SUBSYSTEM_ID_LPE11010S 0xfc2A
#define JEDEC_ID_ADDRESS 0x0080001c #define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC #define FIREFLY_JEDEC_ID 0x1ACC
#define SUPERFLY_JEDEC_ID 0x0020 #define SUPERFLY_JEDEC_ID 0x0020
...@@ -1284,6 +1294,10 @@ typedef struct { /* FireFly BIU registers */ ...@@ -1284,6 +1294,10 @@ typedef struct { /* FireFly BIU registers */
#define CMD_FCP_IREAD_CX 0x1B #define CMD_FCP_IREAD_CX 0x1B
#define CMD_FCP_ICMND_CR 0x1C #define CMD_FCP_ICMND_CR 0x1C
#define CMD_FCP_ICMND_CX 0x1D #define CMD_FCP_ICMND_CX 0x1D
#define CMD_FCP_TSEND_CX 0x1F
#define CMD_FCP_TRECEIVE_CX 0x21
#define CMD_FCP_TRSP_CX 0x23
#define CMD_FCP_AUTO_TRSP_CX 0x29
#define CMD_ADAPTER_MSG 0x20 #define CMD_ADAPTER_MSG 0x20
#define CMD_ADAPTER_DUMP 0x22 #define CMD_ADAPTER_DUMP 0x22
...@@ -1310,6 +1324,9 @@ typedef struct { /* FireFly BIU registers */ ...@@ -1310,6 +1324,9 @@ typedef struct { /* FireFly BIU registers */
#define CMD_FCP_IREAD64_CX 0x9B #define CMD_FCP_IREAD64_CX 0x9B
#define CMD_FCP_ICMND64_CR 0x9C #define CMD_FCP_ICMND64_CR 0x9C
#define CMD_FCP_ICMND64_CX 0x9D #define CMD_FCP_ICMND64_CX 0x9D
#define CMD_FCP_TSEND64_CX 0x9F
#define CMD_FCP_TRECEIVE64_CX 0xA1
#define CMD_FCP_TRSP64_CX 0xA3
#define CMD_GEN_REQUEST64_CR 0xC2 #define CMD_GEN_REQUEST64_CR 0xC2
#define CMD_GEN_REQUEST64_CX 0xC3 #define CMD_GEN_REQUEST64_CX 0xC3
......
...@@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba) ...@@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
kfree(mp); kfree(mp);
pmb->context1 = NULL; pmb->context1 = NULL;
if (phba->cfg_soft_wwnn)
u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn) if (phba->cfg_soft_wwpn)
u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
...@@ -349,8 +351,8 @@ lpfc_config_port_post(struct lpfc_hba * phba) ...@@ -349,8 +351,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
phba->hba_state = LPFC_LINK_DOWN; phba->hba_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ring 0 till hba_state is READY */ /* Only process IOCBs on ring 0 till hba_state is READY */
if (psli->ring[psli->ip_ring].cmdringaddr) if (psli->ring[psli->extra_ring].cmdringaddr)
psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT; psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
if (psli->ring[psli->fcp_ring].cmdringaddr) if (psli->ring[psli->fcp_ring].cmdringaddr)
psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
if (psli->ring[psli->next_ring].cmdringaddr) if (psli->ring[psli->next_ring].cmdringaddr)
...@@ -517,7 +519,8 @@ lpfc_handle_eratt(struct lpfc_hba * phba) ...@@ -517,7 +519,8 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
uint32_t event_data; uint32_t event_data;
if (phba->work_hs & HS_FFER6) { if (phba->work_hs & HS_FFER6 ||
phba->work_hs & HS_FFER5) {
/* Re-establishing Link */ /* Re-establishing Link */
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1301 Re-establishing Link " "%d:1301 Re-establishing Link "
...@@ -611,7 +614,7 @@ lpfc_handle_latt(struct lpfc_hba * phba) ...@@ -611,7 +614,7 @@ lpfc_handle_latt(struct lpfc_hba * phba)
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) if (rc == MBX_NOT_FINISHED)
goto lpfc_handle_latt_free_mp; goto lpfc_handle_latt_free_mbuf;
/* Clear Link Attention in HA REG */ /* Clear Link Attention in HA REG */
spin_lock_irq(phba->host->host_lock); spin_lock_irq(phba->host->host_lock);
...@@ -621,6 +624,8 @@ lpfc_handle_latt(struct lpfc_hba * phba) ...@@ -621,6 +624,8 @@ lpfc_handle_latt(struct lpfc_hba * phba)
return; return;
lpfc_handle_latt_free_mbuf:
lpfc_mbuf_free(phba, mp->virt, mp->phys);
lpfc_handle_latt_free_mp: lpfc_handle_latt_free_mp:
kfree(mp); kfree(mp);
lpfc_handle_latt_free_pmb: lpfc_handle_latt_free_pmb:
...@@ -802,19 +807,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) ...@@ -802,19 +807,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
{ {
lpfc_vpd_t *vp; lpfc_vpd_t *vp;
uint16_t dev_id = phba->pcidev->device; uint16_t dev_id = phba->pcidev->device;
uint16_t dev_subid = phba->pcidev->subsystem_device;
uint8_t hdrtype;
int max_speed; int max_speed;
char * ports;
struct { struct {
char * name; char * name;
int max_speed; int max_speed;
char * ports;
char * bus; char * bus;
} m = {"<Unknown>", 0, "", ""}; } m = {"<Unknown>", 0, ""};
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
ports = (hdrtype == 0x80) ? "2-port " : "";
if (mdp && mdp[0] != '\0' if (mdp && mdp[0] != '\0'
&& descp && descp[0] != '\0') && descp && descp[0] != '\0')
return; return;
...@@ -834,130 +833,93 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) ...@@ -834,130 +833,93 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
switch (dev_id) { switch (dev_id) {
case PCI_DEVICE_ID_FIREFLY: case PCI_DEVICE_ID_FIREFLY:
m = (typeof(m)){"LP6000", max_speed, "", "PCI"}; m = (typeof(m)){"LP6000", max_speed, "PCI"};
break; break;
case PCI_DEVICE_ID_SUPERFLY: case PCI_DEVICE_ID_SUPERFLY:
if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
m = (typeof(m)){"LP7000", max_speed, "", "PCI"}; m = (typeof(m)){"LP7000", max_speed, "PCI"};
else else
m = (typeof(m)){"LP7000E", max_speed, "", "PCI"}; m = (typeof(m)){"LP7000E", max_speed, "PCI"};
break; break;
case PCI_DEVICE_ID_DRAGONFLY: case PCI_DEVICE_ID_DRAGONFLY:
m = (typeof(m)){"LP8000", max_speed, "", "PCI"}; m = (typeof(m)){"LP8000", max_speed, "PCI"};
break; break;
case PCI_DEVICE_ID_CENTAUR: case PCI_DEVICE_ID_CENTAUR:
if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
m = (typeof(m)){"LP9002", max_speed, "", "PCI"}; m = (typeof(m)){"LP9002", max_speed, "PCI"};
else else
m = (typeof(m)){"LP9000", max_speed, "", "PCI"}; m = (typeof(m)){"LP9000", max_speed, "PCI"};
break; break;
case PCI_DEVICE_ID_RFLY: case PCI_DEVICE_ID_RFLY:
m = (typeof(m)){"LP952", max_speed, "", "PCI"}; m = (typeof(m)){"LP952", max_speed, "PCI"};
break; break;
case PCI_DEVICE_ID_PEGASUS: case PCI_DEVICE_ID_PEGASUS:
m = (typeof(m)){"LP9802", max_speed, "", "PCI-X"}; m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
break; break;
case PCI_DEVICE_ID_THOR: case PCI_DEVICE_ID_THOR:
if (hdrtype == 0x80) m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
m = (typeof(m)){"LP10000DC",
max_speed, ports, "PCI-X"};
else
m = (typeof(m)){"LP10000",
max_speed, ports, "PCI-X"};
break; break;
case PCI_DEVICE_ID_VIPER: case PCI_DEVICE_ID_VIPER:
m = (typeof(m)){"LPX1000", max_speed, "", "PCI-X"}; m = (typeof(m)){"LPX1000", max_speed, "PCI-X"};
break; break;
case PCI_DEVICE_ID_PFLY: case PCI_DEVICE_ID_PFLY:
m = (typeof(m)){"LP982", max_speed, "", "PCI-X"}; m = (typeof(m)){"LP982", max_speed, "PCI-X"};
break; break;
case PCI_DEVICE_ID_TFLY: case PCI_DEVICE_ID_TFLY:
if (hdrtype == 0x80) m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
m = (typeof(m)){"LP1050DC", max_speed, ports, "PCI-X"};
else
m = (typeof(m)){"LP1050", max_speed, ports, "PCI-X"};
break; break;
case PCI_DEVICE_ID_HELIOS: case PCI_DEVICE_ID_HELIOS:
if (hdrtype == 0x80) m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
m = (typeof(m)){"LP11002", max_speed, ports, "PCI-X2"};
else
m = (typeof(m)){"LP11000", max_speed, ports, "PCI-X2"};
break; break;
case PCI_DEVICE_ID_HELIOS_SCSP: case PCI_DEVICE_ID_HELIOS_SCSP:
m = (typeof(m)){"LP11000-SP", max_speed, ports, "PCI-X2"}; m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
break; break;
case PCI_DEVICE_ID_HELIOS_DCSP: case PCI_DEVICE_ID_HELIOS_DCSP:
m = (typeof(m)){"LP11002-SP", max_speed, ports, "PCI-X2"}; m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
break; break;
case PCI_DEVICE_ID_NEPTUNE: case PCI_DEVICE_ID_NEPTUNE:
if (hdrtype == 0x80) m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
m = (typeof(m)){"LPe1002", max_speed, ports, "PCIe"};
else
m = (typeof(m)){"LPe1000", max_speed, ports, "PCIe"};
break; break;
case PCI_DEVICE_ID_NEPTUNE_SCSP: case PCI_DEVICE_ID_NEPTUNE_SCSP:
m = (typeof(m)){"LPe1000-SP", max_speed, ports, "PCIe"}; m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
break; break;
case PCI_DEVICE_ID_NEPTUNE_DCSP: case PCI_DEVICE_ID_NEPTUNE_DCSP:
m = (typeof(m)){"LPe1002-SP", max_speed, ports, "PCIe"}; m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
break; break;
case PCI_DEVICE_ID_BMID: case PCI_DEVICE_ID_BMID:
m = (typeof(m)){"LP1150", max_speed, ports, "PCI-X2"}; m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
break; break;
case PCI_DEVICE_ID_BSMB: case PCI_DEVICE_ID_BSMB:
m = (typeof(m)){"LP111", max_speed, ports, "PCI-X2"}; m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
break; break;
case PCI_DEVICE_ID_ZEPHYR: case PCI_DEVICE_ID_ZEPHYR:
if (hdrtype == 0x80) m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
m = (typeof(m)){"LPe11002", max_speed, ports, "PCIe"};
else
m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
break; break;
case PCI_DEVICE_ID_ZEPHYR_SCSP: case PCI_DEVICE_ID_ZEPHYR_SCSP:
m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"}; m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
break; break;
case PCI_DEVICE_ID_ZEPHYR_DCSP: case PCI_DEVICE_ID_ZEPHYR_DCSP:
m = (typeof(m)){"LPe11002-SP", max_speed, ports, "PCIe"}; m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
break; break;
case PCI_DEVICE_ID_ZMID: case PCI_DEVICE_ID_ZMID:
m = (typeof(m)){"LPe1150", max_speed, ports, "PCIe"}; m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
break; break;
case PCI_DEVICE_ID_ZSMB: case PCI_DEVICE_ID_ZSMB:
m = (typeof(m)){"LPe111", max_speed, ports, "PCIe"}; m = (typeof(m)){"LPe111", max_speed, "PCIe"};
break; break;
case PCI_DEVICE_ID_LP101: case PCI_DEVICE_ID_LP101:
m = (typeof(m)){"LP101", max_speed, ports, "PCI-X"}; m = (typeof(m)){"LP101", max_speed, "PCI-X"};
break; break;
case PCI_DEVICE_ID_LP10000S: case PCI_DEVICE_ID_LP10000S:
m = (typeof(m)){"LP10000-S", max_speed, ports, "PCI"}; m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
break; break;
case PCI_DEVICE_ID_LP11000S: case PCI_DEVICE_ID_LP11000S:
m = (typeof(m)){"LP11000-S", max_speed,
"PCI-X2"};
break;
case PCI_DEVICE_ID_LPE11000S: case PCI_DEVICE_ID_LPE11000S:
switch (dev_subid) { m = (typeof(m)){"LPe11000-S", max_speed,
case PCI_SUBSYSTEM_ID_LP11000S: "PCIe"};
m = (typeof(m)){"LP11000-S", max_speed,
ports, "PCI-X2"};
break;
case PCI_SUBSYSTEM_ID_LP11002S:
m = (typeof(m)){"LP11002-S", max_speed,
ports, "PCI-X2"};
break;
case PCI_SUBSYSTEM_ID_LPE11000S:
m = (typeof(m)){"LPe11000-S", max_speed,
ports, "PCIe"};
break;
case PCI_SUBSYSTEM_ID_LPE11002S:
m = (typeof(m)){"LPe11002-S", max_speed,
ports, "PCIe"};
break;
case PCI_SUBSYSTEM_ID_LPE11010S:
m = (typeof(m)){"LPe11010-S", max_speed,
"10-port ", "PCIe"};
break;
default:
m = (typeof(m)){ NULL };
break;
}
break; break;
default: default:
m = (typeof(m)){ NULL }; m = (typeof(m)){ NULL };
...@@ -968,8 +930,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) ...@@ -968,8 +930,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
snprintf(mdp, 79,"%s", m.name); snprintf(mdp, 79,"%s", m.name);
if (descp && descp[0] == '\0') if (descp && descp[0] == '\0')
snprintf(descp, 255, snprintf(descp, 255,
"Emulex %s %dGb %s%s Fibre Channel Adapter", "Emulex %s %dGb %s Fibre Channel Adapter",
m.name, m.max_speed, m.ports, m.bus); m.name, m.max_speed, m.bus);
} }
/**************************************************/ /**************************************************/
...@@ -1651,6 +1613,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -1651,6 +1613,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (error) if (error)
goto out_remove_host; goto out_remove_host;
if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev);
if (error)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
"Enable MSI failed, continuing with "
"IRQ\n", phba->brd_no);
}
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
LPFC_DRIVER_NAME, phba); LPFC_DRIVER_NAME, phba);
if (error) { if (error) {
...@@ -1730,6 +1700,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) ...@@ -1730,6 +1700,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_stop_timer(phba); lpfc_stop_timer(phba);
phba->work_hba_events = 0; phba->work_hba_events = 0;
free_irq(phba->pcidev->irq, phba); free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
out_free_sysfs_attr: out_free_sysfs_attr:
lpfc_free_sysfs_attr(phba); lpfc_free_sysfs_attr(phba);
out_remove_host: out_remove_host:
...@@ -1796,6 +1767,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) ...@@ -1796,6 +1767,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/* Release the irq reservation */ /* Release the irq reservation */
free_irq(phba->pcidev->irq, phba); free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
lpfc_cleanup(phba, 0); lpfc_cleanup(phba, 0);
lpfc_stop_timer(phba); lpfc_stop_timer(phba);
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define LOG_NODE 0x80 /* Node table events */ #define LOG_NODE 0x80 /* Node table events */
#define LOG_MISC 0x400 /* Miscellaneous events */ #define LOG_MISC 0x400 /* Miscellaneous events */
#define LOG_SLI 0x800 /* SLI events */ #define LOG_SLI 0x800 /* SLI events */
#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */ #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
#define LOG_LIBDFC 0x2000 /* Libdfc events */ #define LOG_LIBDFC 0x2000 /* Libdfc events */
#define LOG_ALL_MSG 0xffff /* LOG all messages */ #define LOG_ALL_MSG 0xffff /* LOG all messages */
......
...@@ -739,7 +739,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, ...@@ -739,7 +739,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
uint32_t evt) uint32_t evt)
{ {
struct lpfc_iocbq *cmdiocb, *rspiocb; struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp; struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp; uint32_t *lp;
IOCB_t *irsp; IOCB_t *irsp;
struct serv_parm *sp; struct serv_parm *sp;
...@@ -829,6 +829,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, ...@@ -829,6 +829,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
NLP_REGLOGIN_LIST); NLP_REGLOGIN_LIST);
return ndlp->nlp_state; return ndlp->nlp_state;
} }
mp = (struct lpfc_dmabuf *)mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
} else { } else {
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
...@@ -1620,8 +1623,8 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, ...@@ -1620,8 +1623,8 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
* or discovery in progress for this node. Starting discovery * or discovery in progress for this node. Starting discovery
* here will affect the counting of discovery threads. * here will affect the counting of discovery threads.
*/ */
if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) && if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC)){ !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
if (ndlp->nlp_flag & NLP_NPR_ADISC) { if (ndlp->nlp_flag & NLP_NPR_ADISC) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE; ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_ADISC_ISSUE; ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
......
...@@ -297,8 +297,10 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) ...@@ -297,8 +297,10 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
uint32_t resp_info = fcprsp->rspStatus2; uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3; uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
uint32_t host_status = DID_OK; uint32_t host_status = DID_OK;
uint32_t rsplen = 0; uint32_t rsplen = 0;
uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
/* /*
* If this is a task management command, there is no * If this is a task management command, there is no
...@@ -310,10 +312,25 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) ...@@ -310,10 +312,25 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
goto out; goto out;
} }
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
"%d:0730 FCP command failed: RSP " uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
"Data: x%x x%x x%x x%x x%x x%x\n", if (snslen > SCSI_SENSE_BUFFERSIZE)
phba->brd_no, resp_info, scsi_status, snslen = SCSI_SENSE_BUFFERSIZE;
if (resp_info & RSP_LEN_VALID)
rsplen = be32_to_cpu(fcprsp->rspRspLen);
memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
}
lp = (uint32_t *)cmnd->sense_buffer;
if (!scsi_status && (resp_info & RESID_UNDER))
logit = LOG_FCP;
lpfc_printf_log(phba, KERN_WARNING, logit,
"%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no, cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
be32_to_cpu(fcprsp->rspResId), be32_to_cpu(fcprsp->rspResId),
be32_to_cpu(fcprsp->rspSnsLen), be32_to_cpu(fcprsp->rspSnsLen),
be32_to_cpu(fcprsp->rspRspLen), be32_to_cpu(fcprsp->rspRspLen),
...@@ -328,14 +345,6 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) ...@@ -328,14 +345,6 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
} }
} }
if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
if (snslen > SCSI_SENSE_BUFFERSIZE)
snslen = SCSI_SENSE_BUFFERSIZE;
memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
}
cmnd->resid = 0; cmnd->resid = 0;
if (resp_info & RESID_UNDER) { if (resp_info & RESID_UNDER) {
cmnd->resid = be32_to_cpu(fcprsp->rspResId); cmnd->resid = be32_to_cpu(fcprsp->rspResId);
...@@ -378,7 +387,7 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) ...@@ -378,7 +387,7 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
*/ */
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) { (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
"%d:0734 FCP Read Check Error Data: " "%d:0734 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n", phba->brd_no, "x%x x%x x%x x%x\n", phba->brd_no,
be32_to_cpu(fcpcmd->fcpDl), be32_to_cpu(fcpcmd->fcpDl),
...@@ -670,6 +679,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, ...@@ -670,6 +679,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
struct lpfc_iocbq *iocbqrsp; struct lpfc_iocbq *iocbqrsp;
int ret; int ret;
if (!rdata->pnode)
return FAILED;
lpfc_cmd->rdata = rdata; lpfc_cmd->rdata = rdata;
ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
FCP_TARGET_RESET); FCP_TARGET_RESET);
...@@ -976,20 +988,34 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) ...@@ -976,20 +988,34 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
lpfc_block_error_handler(cmnd); lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
loopcnt = 0;
/* /*
* If target is not in a MAPPED state, delay the reset until * If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires. * target is rediscovered or devloss timeout expires.
*/ */
while ( 1 ) { while ( 1 ) {
if (!pnode) if (!pnode)
break; return FAILED;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
spin_unlock_irq(phba->host->host_lock); spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(msecs_to_jiffies(500)); schedule_timeout_uninterruptible(msecs_to_jiffies(500));
spin_lock_irq(phba->host->host_lock); spin_lock_irq(phba->host->host_lock);
loopcnt++;
rdata = cmnd->device->hostdata;
if (!rdata ||
(loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0721 LUN Reset rport failure:"
" cnt x%x rdata x%p\n",
phba->brd_no, loopcnt, rdata);
goto out;
}
pnode = rdata->pnode;
if (!pnode)
return FAILED;
} }
if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
break; break;
} }
......
...@@ -117,6 +117,10 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) ...@@ -117,6 +117,10 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_FCP_IREAD_CX: case CMD_FCP_IREAD_CX:
case CMD_FCP_ICMND_CR: case CMD_FCP_ICMND_CR:
case CMD_FCP_ICMND_CX: case CMD_FCP_ICMND_CX:
case CMD_FCP_TSEND_CX:
case CMD_FCP_TRSP_CX:
case CMD_FCP_TRECEIVE_CX:
case CMD_FCP_AUTO_TRSP_CX:
case CMD_ADAPTER_MSG: case CMD_ADAPTER_MSG:
case CMD_ADAPTER_DUMP: case CMD_ADAPTER_DUMP:
case CMD_XMIT_SEQUENCE64_CR: case CMD_XMIT_SEQUENCE64_CR:
...@@ -131,6 +135,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) ...@@ -131,6 +135,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_FCP_IREAD64_CX: case CMD_FCP_IREAD64_CX:
case CMD_FCP_ICMND64_CR: case CMD_FCP_ICMND64_CR:
case CMD_FCP_ICMND64_CX: case CMD_FCP_ICMND64_CX:
case CMD_FCP_TSEND64_CX:
case CMD_FCP_TRSP64_CX:
case CMD_FCP_TRECEIVE64_CX:
case CMD_GEN_REQUEST64_CR: case CMD_GEN_REQUEST64_CR:
case CMD_GEN_REQUEST64_CX: case CMD_GEN_REQUEST64_CX:
case CMD_XMIT_ELS_RSP64_CX: case CMD_XMIT_ELS_RSP64_CX:
...@@ -1098,6 +1105,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, ...@@ -1098,6 +1105,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
lpfc_sli_pcimem_bcopy((uint32_t *) entry, lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb, (uint32_t *) &rspiocbq.iocb,
sizeof (IOCB_t)); sizeof (IOCB_t));
INIT_LIST_HEAD(&(rspiocbq.list));
irsp = &rspiocbq.iocb; irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
...@@ -1149,6 +1157,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, ...@@ -1149,6 +1157,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
} }
} }
break; break;
case LPFC_UNSOL_IOCB:
spin_unlock_irqrestore(phba->host->host_lock, iflag);
lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
spin_lock_irqsave(phba->host->host_lock, iflag);
break;
default: default:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) { if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
char adaptermsg[LPFC_MAX_ADPTMSG]; char adaptermsg[LPFC_MAX_ADPTMSG];
...@@ -2472,13 +2485,17 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) ...@@ -2472,13 +2485,17 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
psli = &phba->sli; psli = &phba->sli;
/* Adjust cmd/rsp ring iocb entries more evenly */ /* Adjust cmd/rsp ring iocb entries more evenly */
/* Take some away from the FCP ring */
pring = &psli->ring[psli->fcp_ring]; pring = &psli->ring[psli->fcp_ring];
pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
pring = &psli->ring[1]; /* and give them to the extra ring */
pring = &psli->ring[psli->extra_ring];
pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
...@@ -2488,8 +2505,8 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba) ...@@ -2488,8 +2505,8 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
pring->iotag_max = 4096; pring->iotag_max = 4096;
pring->num_mask = 1; pring->num_mask = 1;
pring->prt[0].profile = 0; /* Mask 0 */ pring->prt[0].profile = 0; /* Mask 0 */
pring->prt[0].rctl = FC_UNSOL_DATA; pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
pring->prt[0].type = 5; pring->prt[0].type = phba->cfg_multi_ring_type;
pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
return 0; return 0;
} }
...@@ -2505,7 +2522,7 @@ lpfc_sli_setup(struct lpfc_hba *phba) ...@@ -2505,7 +2522,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
psli->sli_flag = 0; psli->sli_flag = 0;
psli->fcp_ring = LPFC_FCP_RING; psli->fcp_ring = LPFC_FCP_RING;
psli->next_ring = LPFC_FCP_NEXT_RING; psli->next_ring = LPFC_FCP_NEXT_RING;
psli->ip_ring = LPFC_IP_RING; psli->extra_ring = LPFC_EXTRA_RING;
psli->iocbq_lookup = NULL; psli->iocbq_lookup = NULL;
psli->iocbq_lookup_len = 0; psli->iocbq_lookup_len = 0;
...@@ -2528,7 +2545,7 @@ lpfc_sli_setup(struct lpfc_hba *phba) ...@@ -2528,7 +2545,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->fast_iotag = pring->iotag_max; pring->fast_iotag = pring->iotag_max;
pring->num_mask = 0; pring->num_mask = 0;
break; break;
case LPFC_IP_RING: /* ring 1 - IP */ case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
/* numCiocb and numRiocb are used in config_port */ /* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
...@@ -3238,6 +3255,21 @@ lpfc_intr_handler(int irq, void *dev_id) ...@@ -3238,6 +3255,21 @@ lpfc_intr_handler(int irq, void *dev_id)
lpfc_sli_handle_fast_ring_event(phba, lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING], &phba->sli.ring[LPFC_FCP_RING],
status); status);
if (phba->cfg_multi_ring_support == 2) {
/*
* Process all events on extra ring. Take the optimized path
* for extra ring IO. Any other IO is slow path and is handled
* by the worker thread.
*/
status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status >>= (4*LPFC_EXTRA_RING);
if (status & HA_RXATT) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_EXTRA_RING],
status);
}
}
return IRQ_HANDLED; return IRQ_HANDLED;
} /* lpfc_intr_handler */ } /* lpfc_intr_handler */
...@@ -198,7 +198,7 @@ struct lpfc_sli { ...@@ -198,7 +198,7 @@ struct lpfc_sli {
int fcp_ring; /* ring used for FCP initiator commands */ int fcp_ring; /* ring used for FCP initiator commands */
int next_ring; int next_ring;
int ip_ring; /* ring used for IP network drv cmds */ int extra_ring; /* extra ring used for other protocols */
struct lpfc_sli_stat slistat; /* SLI statistical info */ struct lpfc_sli_stat slistat; /* SLI statistical info */
struct list_head mboxq; struct list_head mboxq;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "8.1.10" #define LPFC_DRIVER_VERSION "8.1.11"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
......
...@@ -73,10 +73,10 @@ static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT; ...@@ -73,10 +73,10 @@ static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
module_param(max_mbox_busy_wait, ushort, 0); module_param(max_mbox_busy_wait, ushort, 0);
MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)"); MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
#define RDINDOOR(adapter) readl((adapter)->base + 0x20) #define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
#define RDOUTDOOR(adapter) readl((adapter)->base + 0x2C) #define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
#define WRINDOOR(adapter,value) writel(value, (adapter)->base + 0x20) #define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
#define WROUTDOOR(adapter,value) writel(value, (adapter)->base + 0x2C) #define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
/* /*
* Global variables * Global variables
...@@ -1386,7 +1386,8 @@ megaraid_isr_memmapped(int irq, void *devp) ...@@ -1386,7 +1386,8 @@ megaraid_isr_memmapped(int irq, void *devp)
handled = 1; handled = 1;
while( RDINDOOR(adapter) & 0x02 ) cpu_relax(); while( RDINDOOR(adapter) & 0x02 )
cpu_relax();
mega_cmd_done(adapter, completed, nstatus, status); mega_cmd_done(adapter, completed, nstatus, status);
...@@ -4668,6 +4669,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -4668,6 +4669,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->host_no, mega_baseport, irq); host->host_no, mega_baseport, irq);
adapter->base = mega_baseport; adapter->base = mega_baseport;
if (flag & BOARD_MEMMAP)
adapter->mmio_base = (void __iomem *) mega_baseport;
INIT_LIST_HEAD(&adapter->free_list); INIT_LIST_HEAD(&adapter->free_list);
INIT_LIST_HEAD(&adapter->pending_list); INIT_LIST_HEAD(&adapter->pending_list);
......
...@@ -801,7 +801,8 @@ typedef struct { ...@@ -801,7 +801,8 @@ typedef struct {
clustering is available */ clustering is available */
u32 flag; u32 flag;
unsigned long base; unsigned long base;
void __iomem *mmio_base;
/* mbox64 with mbox not aligned on 16-byte boundry */ /* mbox64 with mbox not aligned on 16-byte boundry */
mbox64_t *una_mbox64; mbox64_t *una_mbox64;
......
...@@ -517,7 +517,7 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, ...@@ -517,7 +517,7 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
* Returns the number of frames required for numnber of sge's (sge_count) * Returns the number of frames required for numnber of sge's (sge_count)
*/ */
u32 megasas_get_frame_count(u8 sge_count) static u32 megasas_get_frame_count(u8 sge_count)
{ {
int num_cnt; int num_cnt;
int sge_bytes; int sge_bytes;
...@@ -1733,7 +1733,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance, ...@@ -1733,7 +1733,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
* *
* Tasklet to complete cmds * Tasklet to complete cmds
*/ */
void megasas_complete_cmd_dpc(unsigned long instance_addr) static void megasas_complete_cmd_dpc(unsigned long instance_addr)
{ {
u32 producer; u32 producer;
u32 consumer; u32 consumer;
......
...@@ -589,10 +589,12 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd) ...@@ -589,10 +589,12 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
static struct ncr_driver_setup static struct ncr_driver_setup
driver_setup = SCSI_NCR_DRIVER_SETUP; driver_setup = SCSI_NCR_DRIVER_SETUP;
#ifndef MODULE
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
static struct ncr_driver_setup static struct ncr_driver_setup
driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP; driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP;
#endif #endif
#endif /* !MODULE */
#define initverbose (driver_setup.verbose) #define initverbose (driver_setup.verbose)
#define bootverbose (np->verbose) #define bootverbose (np->verbose)
...@@ -641,6 +643,13 @@ static struct ncr_driver_setup ...@@ -641,6 +643,13 @@ static struct ncr_driver_setup
#define OPT_IARB 26 #define OPT_IARB 26
#endif #endif
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
#ifndef MODULE
static char setup_token[] __initdata = static char setup_token[] __initdata =
"tags:" "mpar:" "tags:" "mpar:"
"spar:" "disc:" "spar:" "disc:"
...@@ -660,12 +669,6 @@ static char setup_token[] __initdata = ...@@ -660,12 +669,6 @@ static char setup_token[] __initdata =
#endif #endif
; /* DONNOT REMOVE THIS ';' */ ; /* DONNOT REMOVE THIS ';' */
#ifdef MODULE
#define ARG_SEP ' '
#else
#define ARG_SEP ','
#endif
static int __init get_setup_token(char *p) static int __init get_setup_token(char *p)
{ {
char *cur = setup_token; char *cur = setup_token;
...@@ -682,7 +685,6 @@ static int __init get_setup_token(char *p) ...@@ -682,7 +685,6 @@ static int __init get_setup_token(char *p)
return 0; return 0;
} }
static int __init sym53c8xx__setup(char *str) static int __init sym53c8xx__setup(char *str)
{ {
#ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
...@@ -804,6 +806,7 @@ static int __init sym53c8xx__setup(char *str) ...@@ -804,6 +806,7 @@ static int __init sym53c8xx__setup(char *str)
#endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */ #endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
return 1; return 1;
} }
#endif /* !MODULE */
/*=================================================================== /*===================================================================
** **
...@@ -8321,12 +8324,12 @@ char *ncr53c8xx; /* command line passed by insmod */ ...@@ -8321,12 +8324,12 @@ char *ncr53c8xx; /* command line passed by insmod */
module_param(ncr53c8xx, charp, 0); module_param(ncr53c8xx, charp, 0);
#endif #endif
#ifndef MODULE
static int __init ncr53c8xx_setup(char *str) static int __init ncr53c8xx_setup(char *str)
{ {
return sym53c8xx__setup(str); return sym53c8xx__setup(str);
} }
#ifndef MODULE
__setup("ncr53c8xx=", ncr53c8xx_setup); __setup("ncr53c8xx=", ncr53c8xx_setup);
#endif #endif
......
...@@ -390,7 +390,7 @@ static struct sysfs_entry { ...@@ -390,7 +390,7 @@ static struct sysfs_entry {
{ "optrom_ctl", &sysfs_optrom_ctl_attr, }, { "optrom_ctl", &sysfs_optrom_ctl_attr, },
{ "vpd", &sysfs_vpd_attr, 1 }, { "vpd", &sysfs_vpd_attr, 1 },
{ "sfp", &sysfs_sfp_attr, 1 }, { "sfp", &sysfs_sfp_attr, 1 },
{ 0 }, { NULL },
}; };
void void
......
...@@ -59,9 +59,6 @@ int ...@@ -59,9 +59,6 @@ int
qla2x00_initialize_adapter(scsi_qla_host_t *ha) qla2x00_initialize_adapter(scsi_qla_host_t *ha)
{ {
int rval; int rval;
uint8_t restart_risc = 0;
uint8_t retry;
uint32_t wait_time;
/* Clear adapter flags. */ /* Clear adapter flags. */
ha->flags.online = 0; ha->flags.online = 0;
...@@ -104,87 +101,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha) ...@@ -104,87 +101,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
retry = 10; if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
/* rval = ha->isp_ops.chip_diag(ha);
* Try to configure the loop. if (rval)
*/ return (rval);
do { rval = qla2x00_setup_chip(ha);
restart_risc = 0; if (rval)
return (rval);
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
if ((rval = ha->isp_ops.chip_diag(ha)) == QLA_SUCCESS) {
rval = qla2x00_setup_chip(ha);
}
}
if (rval == QLA_SUCCESS &&
(rval = qla2x00_init_rings(ha)) == QLA_SUCCESS) {
check_fw_ready_again:
/*
* Wait for a successful LIP up to a maximum
* of (in seconds): RISC login timeout value,
* RISC retry count value, and port down retry
* value OR a minimum of 4 seconds OR If no
* cable, only 5 seconds.
*/
rval = qla2x00_fw_ready(ha);
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
/* Issue a marker after FW becomes ready. */
qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
/*
* Wait at most MAX_TARGET RSCNs for a stable
* link.
*/
wait_time = 256;
do {
clear_bit(LOOP_RESYNC_NEEDED,
&ha->dpc_flags);
rval = qla2x00_configure_loop(ha);
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&ha->dpc_flags)) {
restart_risc = 1;
break;
}
/*
* If loop state change while we were
* discoverying devices then wait for
* LIP to complete
*/
if (atomic_read(&ha->loop_state) !=
LOOP_READY && retry--) {
goto check_fw_ready_again;
}
wait_time--;
} while (!atomic_read(&ha->loop_down_timer) &&
retry &&
wait_time &&
(test_bit(LOOP_RESYNC_NEEDED,
&ha->dpc_flags)));
if (wait_time == 0)
rval = QLA_FUNCTION_FAILED;
} else if (ha->device_flags & DFLG_NO_CABLE)
/* If no cable, then all is good. */
rval = QLA_SUCCESS;
}
} while (restart_risc && retry--);
if (rval == QLA_SUCCESS) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
ha->marker_needed = 0;
ha->flags.online = 1;
} else {
DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
} }
rval = qla2x00_init_rings(ha);
return (rval); return (rval);
} }
...@@ -2208,8 +2133,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) ...@@ -2208,8 +2133,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
atomic_set(&fcport->state, FCS_ONLINE); atomic_set(&fcport->state, FCS_ONLINE);
if (ha->flags.init_done) qla2x00_reg_remote_port(ha, fcport);
qla2x00_reg_remote_port(ha, fcport);
} }
void void
......
...@@ -95,6 +95,8 @@ MODULE_PARM_DESC(ql2xqfullrampup, ...@@ -95,6 +95,8 @@ MODULE_PARM_DESC(ql2xqfullrampup,
*/ */
static int qla2xxx_slave_configure(struct scsi_device * device); static int qla2xxx_slave_configure(struct scsi_device * device);
static int qla2xxx_slave_alloc(struct scsi_device *); static int qla2xxx_slave_alloc(struct scsi_device *);
static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
static void qla2xxx_scan_start(struct Scsi_Host *);
static void qla2xxx_slave_destroy(struct scsi_device *); static void qla2xxx_slave_destroy(struct scsi_device *);
static int qla2x00_queuecommand(struct scsi_cmnd *cmd, static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
void (*fn)(struct scsi_cmnd *)); void (*fn)(struct scsi_cmnd *));
...@@ -124,6 +126,8 @@ static struct scsi_host_template qla2x00_driver_template = { ...@@ -124,6 +126,8 @@ static struct scsi_host_template qla2x00_driver_template = {
.slave_alloc = qla2xxx_slave_alloc, .slave_alloc = qla2xxx_slave_alloc,
.slave_destroy = qla2xxx_slave_destroy, .slave_destroy = qla2xxx_slave_destroy,
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = qla2x00_change_queue_depth, .change_queue_depth = qla2x00_change_queue_depth,
.change_queue_type = qla2x00_change_queue_type, .change_queue_type = qla2x00_change_queue_type,
.this_id = -1, .this_id = -1,
...@@ -287,7 +291,7 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) ...@@ -287,7 +291,7 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
return str; return str;
} }
char * static char *
qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
{ {
char un_str[10]; char un_str[10];
...@@ -325,7 +329,7 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) ...@@ -325,7 +329,7 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
return (str); return (str);
} }
char * static char *
qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
{ {
sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
...@@ -634,7 +638,7 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd) ...@@ -634,7 +638,7 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
* Note: * Note:
* Only return FAILED if command not returned by firmware. * Only return FAILED if command not returned by firmware.
**************************************************************************/ **************************************************************************/
int static int
qla2xxx_eh_abort(struct scsi_cmnd *cmd) qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{ {
scsi_qla_host_t *ha = to_qla_host(cmd->device->host); scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
...@@ -771,7 +775,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t) ...@@ -771,7 +775,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
* SUCCESS/FAILURE (defined as macro in scsi.h). * SUCCESS/FAILURE (defined as macro in scsi.h).
* *
**************************************************************************/ **************************************************************************/
int static int
qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{ {
scsi_qla_host_t *ha = to_qla_host(cmd->device->host); scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
...@@ -902,7 +906,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha) ...@@ -902,7 +906,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
* SUCCESS/FAILURE (defined as macro in scsi.h). * SUCCESS/FAILURE (defined as macro in scsi.h).
* *
**************************************************************************/ **************************************************************************/
int static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{ {
scsi_qla_host_t *ha = to_qla_host(cmd->device->host); scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
...@@ -963,7 +967,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) ...@@ -963,7 +967,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
* *
* Note: * Note:
**************************************************************************/ **************************************************************************/
int static int
qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{ {
scsi_qla_host_t *ha = to_qla_host(cmd->device->host); scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
...@@ -1366,6 +1370,29 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha) ...@@ -1366,6 +1370,29 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
} }
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
set_bit(RSCN_UPDATE, &ha->dpc_flags);
}
static int
qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
if (!ha->host)
return 1;
if (time > ha->loop_reset_delay * HZ)
return 1;
return atomic_read(&ha->loop_state) == LOOP_READY;
}
/* /*
* PCI driver interface * PCI driver interface
*/ */
...@@ -1377,10 +1404,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1377,10 +1404,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *host; struct Scsi_Host *host;
scsi_qla_host_t *ha; scsi_qla_host_t *ha;
unsigned long flags = 0; unsigned long flags = 0;
unsigned long wait_switch = 0;
char pci_info[20]; char pci_info[20];
char fw_str[30]; char fw_str[30];
fc_port_t *fcport;
struct scsi_host_template *sht; struct scsi_host_template *sht;
if (pci_enable_device(pdev)) if (pci_enable_device(pdev))
...@@ -1631,30 +1656,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1631,30 +1656,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->isp_ops.enable_intrs(ha); ha->isp_ops.enable_intrs(ha);
/* v2.19.5b6 */
/*
* Wait around max loop_reset_delay secs for the devices to come
* on-line. We don't want Linux scanning before we are ready.
*
*/
for (wait_switch = jiffies + (ha->loop_reset_delay * HZ);
time_before(jiffies,wait_switch) &&
!(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES))
&& (ha->device_flags & SWITCH_FOUND) ;) {
qla2x00_check_fabric_devices(ha);
msleep(10);
}
pci_set_drvdata(pdev, ha); pci_set_drvdata(pdev, ha);
ha->flags.init_done = 1; ha->flags.init_done = 1;
ha->flags.online = 1;
num_hosts++; num_hosts++;
ret = scsi_add_host(host, &pdev->dev); ret = scsi_add_host(host, &pdev->dev);
if (ret) if (ret)
goto probe_failed; goto probe_failed;
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(ha); qla2x00_alloc_sysfs_attr(ha);
qla2x00_init_host_attr(ha); qla2x00_init_host_attr(ha);
...@@ -1669,10 +1683,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1669,10 +1683,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
ha->isp_ops.fw_version_str(ha, fw_str)); ha->isp_ops.fw_version_str(ha, fw_str));
/* Go with fc_rport registration. */
list_for_each_entry(fcport, &ha->fcports, list)
qla2x00_reg_remote_port(ha, fcport);
return 0; return 0;
probe_failed: probe_failed:
......
...@@ -449,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr) ...@@ -449,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
return FARX_ACCESS_NVRAM_DATA | naddr; return FARX_ACCESS_NVRAM_DATA | naddr;
} }
uint32_t static uint32_t
qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr) qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
{ {
int rval; int rval;
...@@ -490,7 +490,7 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, ...@@ -490,7 +490,7 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
return dwptr; return dwptr;
} }
int static int
qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data) qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
{ {
int rval; int rval;
...@@ -512,7 +512,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data) ...@@ -512,7 +512,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
return rval; return rval;
} }
void static void
qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
uint8_t *flash_id) uint8_t *flash_id)
{ {
...@@ -537,7 +537,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, ...@@ -537,7 +537,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
} }
} }
int static int
qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords) uint32_t dwords)
{ {
......
...@@ -71,7 +71,7 @@ void __dump_registers(struct scsi_qla_host *ha) ...@@ -71,7 +71,7 @@ void __dump_registers(struct scsi_qla_host *ha)
readw(&ha->reg->u1.isp4010.nvram)); readw(&ha->reg->u1.isp4010.nvram));
} }
else if (is_qla4022(ha)) { else if (is_qla4022(ha) | is_qla4032(ha)) {
printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, (uint8_t) offsetof(struct isp_reg,
u1.isp4022.intr_mask), u1.isp4022.intr_mask),
...@@ -119,7 +119,7 @@ void __dump_registers(struct scsi_qla_host *ha) ...@@ -119,7 +119,7 @@ void __dump_registers(struct scsi_qla_host *ha)
readw(&ha->reg->u2.isp4010.port_err_status)); readw(&ha->reg->u2.isp4010.port_err_status));
} }
else if (is_qla4022(ha)) { else if (is_qla4022(ha) | is_qla4032(ha)) {
printk(KERN_INFO "Page 0 Registers:\n"); printk(KERN_INFO "Page 0 Registers:\n");
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, (uint8_t) offsetof(struct isp_reg,
......
...@@ -40,7 +40,11 @@ ...@@ -40,7 +40,11 @@
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022 #define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
#endif /* */ #endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
#define PCI_DEVICE_ID_QLOGIC_ISP4032 0x4032
#endif
#define QLA_SUCCESS 0 #define QLA_SUCCESS 0
#define QLA_ERROR 1 #define QLA_ERROR 1
...@@ -277,7 +281,6 @@ struct scsi_qla_host { ...@@ -277,7 +281,6 @@ struct scsi_qla_host {
#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */ #define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ #define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */ #define AF_LINK_UP 8 /* 0x00000100 */
#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ #define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */ #define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */ #define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
...@@ -317,16 +320,17 @@ struct scsi_qla_host { ...@@ -317,16 +320,17 @@ struct scsi_qla_host {
/* NVRAM registers */ /* NVRAM registers */
struct eeprom_data *nvram; struct eeprom_data *nvram;
spinlock_t hardware_lock ____cacheline_aligned; spinlock_t hardware_lock ____cacheline_aligned;
spinlock_t list_lock;
uint32_t eeprom_cmd_data; uint32_t eeprom_cmd_data;
/* Counters for general statistics */ /* Counters for general statistics */
uint64_t isr_count;
uint64_t adapter_error_count; uint64_t adapter_error_count;
uint64_t device_error_count; uint64_t device_error_count;
uint64_t total_io_count; uint64_t total_io_count;
uint64_t total_mbytes_xferred; uint64_t total_mbytes_xferred;
uint64_t link_failure_count; uint64_t link_failure_count;
uint64_t invalid_crc_count; uint64_t invalid_crc_count;
uint32_t bytes_xfered;
uint32_t spurious_int_count; uint32_t spurious_int_count;
uint32_t aborted_io_count; uint32_t aborted_io_count;
uint32_t io_timeout_count; uint32_t io_timeout_count;
...@@ -438,6 +442,11 @@ static inline int is_qla4022(struct scsi_qla_host *ha) ...@@ -438,6 +442,11 @@ static inline int is_qla4022(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022; return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
} }
static inline int is_qla4032(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
}
static inline int adapter_up(struct scsi_qla_host *ha) static inline int adapter_up(struct scsi_qla_host *ha)
{ {
return (test_bit(AF_ONLINE, &ha->flags) != 0) && return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
...@@ -451,58 +460,58 @@ static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost) ...@@ -451,58 +460,58 @@ static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha) static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
&ha->reg->u1.isp4022.semaphore : &ha->reg->u1.isp4010.nvram :
&ha->reg->u1.isp4010.nvram); &ha->reg->u1.isp4022.semaphore);
} }
static inline void __iomem* isp_nvram(struct scsi_qla_host *ha) static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
&ha->reg->u1.isp4022.nvram : &ha->reg->u1.isp4010.nvram :
&ha->reg->u1.isp4010.nvram); &ha->reg->u1.isp4022.nvram);
} }
static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha) static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
&ha->reg->u2.isp4022.p0.ext_hw_conf : &ha->reg->u2.isp4010.ext_hw_conf :
&ha->reg->u2.isp4010.ext_hw_conf); &ha->reg->u2.isp4022.p0.ext_hw_conf);
} }
static inline void __iomem* isp_port_status(struct scsi_qla_host *ha) static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
&ha->reg->u2.isp4022.p0.port_status : &ha->reg->u2.isp4010.port_status :
&ha->reg->u2.isp4010.port_status); &ha->reg->u2.isp4022.p0.port_status);
} }
static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha) static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
&ha->reg->u2.isp4022.p0.port_ctrl : &ha->reg->u2.isp4010.port_ctrl :
&ha->reg->u2.isp4010.port_ctrl); &ha->reg->u2.isp4022.p0.port_ctrl);
} }
static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha) static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
&ha->reg->u2.isp4022.p0.port_err_status : &ha->reg->u2.isp4010.port_err_status :
&ha->reg->u2.isp4010.port_err_status); &ha->reg->u2.isp4022.p0.port_err_status);
} }
static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha) static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
&ha->reg->u2.isp4022.p0.gp_out : &ha->reg->u2.isp4010.gp_out :
&ha->reg->u2.isp4010.gp_out); &ha->reg->u2.isp4022.p0.gp_out);
} }
static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha) static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
{ {
return (is_qla4022(ha) ? return (is_qla4010(ha) ?
offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 : offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2); offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
} }
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits); int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
...@@ -511,59 +520,59 @@ int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits); ...@@ -511,59 +520,59 @@ int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
static inline int ql4xxx_lock_flash(struct scsi_qla_host *a) static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
{ {
if (is_qla4022(a)) if (is_qla4010(a))
return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
QL4010_FLASH_SEM_BITS);
else
return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK, return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE | (QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 13); (a->mac_index)) << 13);
else
return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
QL4010_FLASH_SEM_BITS);
} }
static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a) static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
{ {
if (is_qla4022(a)) if (is_qla4010(a))
ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK); ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
} }
static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a) static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
{ {
if (is_qla4022(a)) if (is_qla4010(a))
return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
QL4010_NVRAM_SEM_BITS);
else
return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK, return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE | (QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 10); (a->mac_index)) << 10);
else
return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
QL4010_NVRAM_SEM_BITS);
} }
static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a) static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
{ {
if (is_qla4022(a)) if (is_qla4010(a))
ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK); ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
} }
static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a) static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
{ {
if (is_qla4022(a)) if (is_qla4010(a))
return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
QL4010_DRVR_SEM_BITS);
else
return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK, return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE | (QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 1); (a->mac_index)) << 1);
else
return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
QL4010_DRVR_SEM_BITS);
} }
static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
{ {
if (is_qla4022(a)) if (is_qla4010(a))
ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK); ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
} }
/*---------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/
......
...@@ -296,7 +296,6 @@ static inline uint32_t clr_rmask(uint32_t val) ...@@ -296,7 +296,6 @@ static inline uint32_t clr_rmask(uint32_t val)
/* ISP Semaphore definitions */ /* ISP Semaphore definitions */
/* ISP General Purpose Output definitions */ /* ISP General Purpose Output definitions */
#define GPOR_TOPCAT_RESET 0x00000004
/* shadow registers (DMA'd from HA to system memory. read only) */ /* shadow registers (DMA'd from HA to system memory. read only) */
struct shadow_regs { struct shadow_regs {
...@@ -339,10 +338,13 @@ union external_hw_config_reg { ...@@ -339,10 +338,13 @@ union external_hw_config_reg {
/* Mailbox command definitions */ /* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009 #define MBOX_CMD_ABOUT_FW 0x0009
#define MBOX_CMD_LUN_RESET 0x0016 #define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
#define MBOX_CMD_GET_FW_STATUS 0x001F #define MBOX_CMD_GET_FW_STATUS 0x001F
#define MBOX_CMD_SET_ISNS_SERVICE 0x0021 #define MBOX_CMD_SET_ISNS_SERVICE 0x0021
#define ISNS_DISABLE 0 #define ISNS_DISABLE 0
#define ISNS_ENABLE 1 #define ISNS_ENABLE 1
#define MBOX_CMD_COPY_FLASH 0x0024
#define MBOX_CMD_WRITE_FLASH 0x0025
#define MBOX_CMD_READ_FLASH 0x0026 #define MBOX_CMD_READ_FLASH 0x0026
#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 #define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
...@@ -360,10 +362,13 @@ union external_hw_config_reg { ...@@ -360,10 +362,13 @@ union external_hw_config_reg {
#define DDB_DS_SESSION_FAILED 0x06 #define DDB_DS_SESSION_FAILED 0x06
#define DDB_DS_LOGIN_IN_PROCESS 0x07 #define DDB_DS_LOGIN_IN_PROCESS 0x07
#define MBOX_CMD_GET_FW_STATE 0x0069 #define MBOX_CMD_GET_FW_STATE 0x0069
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
/* Mailbox 1 */ /* Mailbox 1 */
#define FW_STATE_READY 0x0000 #define FW_STATE_READY 0x0000
#define FW_STATE_CONFIG_WAIT 0x0001 #define FW_STATE_CONFIG_WAIT 0x0001
#define FW_STATE_WAIT_LOGIN 0x0002
#define FW_STATE_ERROR 0x0004 #define FW_STATE_ERROR 0x0004
#define FW_STATE_DHCP_IN_PROGRESS 0x0008 #define FW_STATE_DHCP_IN_PROGRESS 0x0008
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#ifndef __QLA4x_GBL_H #ifndef __QLA4x_GBL_H
#define __QLA4x_GBL_H #define __QLA4x_GBL_H
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port); int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb); int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
int qla4xxx_initialize_adapter(struct scsi_qla_host * ha, int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
...@@ -75,4 +76,4 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha, ...@@ -75,4 +76,4 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
extern int ql4xextended_error_logging; extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait; extern int ql4xdiscoverywait;
extern int ql4xdontresethba; extern int ql4xdontresethba;
#endif /* _QLA4x_GBL_H */ #endif /* _QLA4x_GBL_H */
此差异已折叠。
...@@ -38,7 +38,7 @@ qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index) ...@@ -38,7 +38,7 @@ qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
static inline void static inline void
__qla4xxx_enable_intrs(struct scsi_qla_host *ha) __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{ {
if (is_qla4022(ha)) { if (is_qla4022(ha) | is_qla4032(ha)) {
writel(set_rmask(IMR_SCSI_INTR_ENABLE), writel(set_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask); &ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask); readl(&ha->reg->u1.isp4022.intr_mask);
...@@ -52,7 +52,7 @@ __qla4xxx_enable_intrs(struct scsi_qla_host *ha) ...@@ -52,7 +52,7 @@ __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
static inline void static inline void
__qla4xxx_disable_intrs(struct scsi_qla_host *ha) __qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{ {
if (is_qla4022(ha)) { if (is_qla4022(ha) | is_qla4032(ha)) {
writel(clr_rmask(IMR_SCSI_INTR_ENABLE), writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask); &ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask); readl(&ha->reg->u1.isp4022.intr_mask);
......
...@@ -294,6 +294,12 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) ...@@ -294,6 +294,12 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
cmd_entry->control_flags = CF_WRITE; cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE) else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ; cmd_entry->control_flags = CF_READ;
ha->bytes_xfered += cmd->request_bufflen;
if (ha->bytes_xfered & ~0xFFFFF){
ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
ha->bytes_xfered &= 0xFFFFF;
}
} }
/* Set tagged queueing control flags */ /* Set tagged queueing control flags */
......
...@@ -627,6 +627,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) ...@@ -627,6 +627,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isr_count++;
/* /*
* Repeatedly service interrupts up to a maximum of * Repeatedly service interrupts up to a maximum of
* MAX_REQS_SERVICED_PER_INTR * MAX_REQS_SERVICED_PER_INTR
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册