提交 849a8924 编写于 作者: J James Bottomley

Merge by Hand

Conflicts in dec_esp.c (Thanks Bacchus), scsi_transport_iscsi.c and
scsi_transport_fc.h
Signed-off-by: NJames Bottomley <James.Bottomley@SteelEye.com>
Copyright (c) 2003-2005 QLogic Corporation
QLogic Linux Fibre Channel HBA Driver
This program includes a device driver for Linux 2.6 that may be
distributed with QLogic hardware specific firmware binary file.
You may modify and redistribute the device driver code under the
GNU General Public License as published by the Free Software
Foundation (version 2 or a later version).
You may redistribute the hardware specific firmware binary file
under the following terms:
1. Redistribution of source code (only if applicable),
must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistribution in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
3. The name of QLogic Corporation may not be used to
endorse or promote products derived from this software
without specific prior written permission
REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
COMBINATION WITH THIS PROGRAM.
......@@ -77,8 +77,8 @@
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
#endif
#define MPT_LINUX_VERSION_COMMON "3.03.03"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.03"
#define MPT_LINUX_VERSION_COMMON "3.03.04"
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.04"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
......@@ -421,6 +421,17 @@ typedef struct _MPT_IOCTL {
struct semaphore sem_ioc;
} MPT_IOCTL;
#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */
#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */
#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */
typedef struct _MPT_SAS_MGMT {
struct semaphore mutex;
struct completion done;
u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
u8 status; /* current command status */
}MPT_SAS_MGMT;
/*
* Event Structure and define
*/
......@@ -604,6 +615,7 @@ typedef struct _MPT_ADAPTER
struct list_head list;
struct net_device *netdev;
struct list_head sas_topology;
MPT_SAS_MGMT sas_mgmt;
} MPT_ADAPTER;
/*
......
......@@ -83,6 +83,7 @@ MODULE_PARM_DESC(mpt_pt_clear,
static int mptsasDoneCtx = -1;
static int mptsasTaskCtx = -1;
static int mptsasInternalCtx = -1; /* Used only for internal commands */
static int mptsasMgmtCtx = -1;
/*
......@@ -123,6 +124,104 @@ struct mptsas_portinfo {
struct mptsas_phyinfo *phy_info;
};
#ifdef SASDEBUG
static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
{
printk("---- IO UNIT PAGE 0 ------------\n");
printk("Handle=0x%X\n",
le16_to_cpu(phy_data->AttachedDeviceHandle));
printk("Controller Handle=0x%X\n",
le16_to_cpu(phy_data->ControllerDevHandle));
printk("Port=0x%X\n", phy_data->Port);
printk("Port Flags=0x%X\n", phy_data->PortFlags);
printk("PHY Flags=0x%X\n", phy_data->PhyFlags);
printk("Negotiated Link Rate=0x%X\n", phy_data->NegotiatedLinkRate);
printk("Controller PHY Device Info=0x%X\n",
le32_to_cpu(phy_data->ControllerPhyDeviceInfo));
printk("DiscoveryStatus=0x%X\n",
le32_to_cpu(phy_data->DiscoveryStatus));
printk("\n");
}
static void mptsas_print_phy_pg0(SasPhyPage0_t *pg0)
{
__le64 sas_address;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
printk("---- SAS PHY PAGE 0 ------------\n");
printk("Attached Device Handle=0x%X\n",
le16_to_cpu(pg0->AttachedDevHandle));
printk("SAS Address=0x%llX\n",
(unsigned long long)le64_to_cpu(sas_address));
printk("Attached PHY Identifier=0x%X\n", pg0->AttachedPhyIdentifier);
printk("Attached Device Info=0x%X\n",
le32_to_cpu(pg0->AttachedDeviceInfo));
printk("Programmed Link Rate=0x%X\n", pg0->ProgrammedLinkRate);
printk("Change Count=0x%X\n", pg0->ChangeCount);
printk("PHY Info=0x%X\n", le32_to_cpu(pg0->PhyInfo));
printk("\n");
}
static void mptsas_print_phy_pg1(SasPhyPage1_t *pg1)
{
printk("---- SAS PHY PAGE 1 ------------\n");
printk("Invalid Dword Count=0x%x\n", pg1->InvalidDwordCount);
printk("Running Disparity Error Count=0x%x\n",
pg1->RunningDisparityErrorCount);
printk("Loss Dword Synch Count=0x%x\n", pg1->LossDwordSynchCount);
printk("PHY Reset Problem Count=0x%x\n", pg1->PhyResetProblemCount);
printk("\n");
}
static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
{
__le64 sas_address;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
printk("---- SAS DEVICE PAGE 0 ---------\n");
printk("Handle=0x%X\n" ,le16_to_cpu(pg0->DevHandle));
printk("Enclosure Handle=0x%X\n", le16_to_cpu(pg0->EnclosureHandle));
printk("Slot=0x%X\n", le16_to_cpu(pg0->Slot));
printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
printk("Target ID=0x%X\n", pg0->TargetID);
printk("Bus=0x%X\n", pg0->Bus);
/* The PhyNum field specifies the PHY number of the parent
* device this device is linked to
*/
printk("Parent Phy Num=0x%X\n", pg0->PhyNum);
printk("Access Status=0x%X\n", le16_to_cpu(pg0->AccessStatus));
printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
printk("Physical Port=0x%X\n", pg0->PhysicalPort);
printk("\n");
}
static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
{
printk("---- SAS EXPANDER PAGE 1 ------------\n");
printk("Physical Port=0x%X\n", pg1->PhysicalPort);
printk("PHY Identifier=0x%X\n", pg1->PhyIdentifier);
printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
printk("Owner Device Handle=0x%X\n",
le16_to_cpu(pg1->OwnerDevHandle));
printk("Attached Device Handle=0x%X\n",
le16_to_cpu(pg1->AttachedDevHandle));
}
#else
#define mptsas_print_phy_data(phy_data) do { } while (0)
#define mptsas_print_phy_pg0(pg0) do { } while (0)
#define mptsas_print_phy_pg1(pg1) do { } while (0)
#define mptsas_print_device_pg0(pg0) do { } while (0)
#define mptsas_print_expander_pg1(pg1) do { } while (0)
#endif
/*
* This is pretty ugly. We will be able to seriously clean it up
* once the DV code in mptscsih goes away and we can properly
......@@ -200,91 +299,159 @@ static struct scsi_host_template mptsas_driver_template = {
.use_clustering = ENABLE_CLUSTERING,
};
static struct sas_function_template mptsas_transport_functions = {
};
static struct scsi_transport_template *mptsas_transport_template;
#ifdef SASDEBUG
static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
{
printk("---- IO UNIT PAGE 0 ------------\n");
printk("Handle=0x%X\n",
le16_to_cpu(phy_data->AttachedDeviceHandle));
printk("Controller Handle=0x%X\n",
le16_to_cpu(phy_data->ControllerDevHandle));
printk("Port=0x%X\n", phy_data->Port);
printk("Port Flags=0x%X\n", phy_data->PortFlags);
printk("PHY Flags=0x%X\n", phy_data->PhyFlags);
printk("Negotiated Link Rate=0x%X\n", phy_data->NegotiatedLinkRate);
printk("Controller PHY Device Info=0x%X\n",
le32_to_cpu(phy_data->ControllerPhyDeviceInfo));
printk("DiscoveryStatus=0x%X\n",
le32_to_cpu(phy_data->DiscoveryStatus));
printk("\n");
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
}
static void mptsas_print_phy_pg0(SasPhyPage0_t *pg0)
static int mptsas_get_linkerrors(struct sas_phy *phy)
{
__le64 sas_address;
MPT_ADAPTER *ioc = phy_to_ioc(phy);
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasPhyPage1_t *buffer;
dma_addr_t dma_handle;
int error;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
hdr.PageVersion = MPI_SASPHY1_PAGEVERSION;
hdr.ExtPageLength = 0;
hdr.PageNumber = 1 /* page number 1*/;
hdr.Reserved1 = 0;
hdr.Reserved2 = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
printk("---- SAS PHY PAGE 0 ------------\n");
printk("Attached Device Handle=0x%X\n",
le16_to_cpu(pg0->AttachedDevHandle));
printk("SAS Address=0x%llX\n",
(unsigned long long)le64_to_cpu(sas_address));
printk("Attached PHY Identifier=0x%X\n", pg0->AttachedPhyIdentifier);
printk("Attached Device Info=0x%X\n",
le32_to_cpu(pg0->AttachedDeviceInfo));
printk("Programmed Link Rate=0x%X\n", pg0->ProgrammedLinkRate);
printk("Change Count=0x%X\n", pg0->ChangeCount);
printk("PHY Info=0x%X\n", le32_to_cpu(pg0->PhyInfo));
printk("\n");
}
cfg.cfghdr.ehdr = &hdr;
cfg.physAddr = -1;
cfg.pageAddr = phy->identify.phy_identifier;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; /* read */
cfg.timeout = 10;
static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
{
__le64 sas_address;
error = mpt_config(ioc, &cfg);
if (error)
return error;
if (!hdr.ExtPageLength)
return -ENXIO;
memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer)
return -ENOMEM;
printk("---- SAS DEVICE PAGE 0 ---------\n");
printk("Handle=0x%X\n" ,le16_to_cpu(pg0->DevHandle));
printk("Enclosure Handle=0x%X\n", le16_to_cpu(pg0->EnclosureHandle));
printk("Slot=0x%X\n", le16_to_cpu(pg0->Slot));
printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
printk("Target ID=0x%X\n", pg0->TargetID);
printk("Bus=0x%X\n", pg0->Bus);
printk("Parent Phy Num=0x%X\n", pg0->PhyNum);
printk("Access Status=0x%X\n", le16_to_cpu(pg0->AccessStatus));
printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
printk("Physical Port=0x%X\n", pg0->PhysicalPort);
printk("\n");
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
error = mpt_config(ioc, &cfg);
if (error)
goto out_free_consistent;
mptsas_print_phy_pg1(buffer);
phy->invalid_dword_count = le32_to_cpu(buffer->InvalidDwordCount);
phy->running_disparity_error_count =
le32_to_cpu(buffer->RunningDisparityErrorCount);
phy->loss_of_dword_sync_count =
le32_to_cpu(buffer->LossDwordSynchCount);
phy->phy_reset_problem_count =
le32_to_cpu(buffer->PhyResetProblemCount);
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
return error;
}
static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply)
{
printk("---- SAS EXPANDER PAGE 1 ------------\n");
ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD;
if (reply != NULL) {
ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID;
memcpy(ioc->sas_mgmt.reply, reply,
min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
}
complete(&ioc->sas_mgmt.done);
return 1;
}
printk("Physical Port=0x%X\n", pg1->PhysicalPort);
printk("PHY Identifier=0x%X\n", pg1->PhyIdentifier);
printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
printk("Owner Device Handle=0x%X\n",
le16_to_cpu(pg1->OwnerDevHandle));
printk("Attached Device Handle=0x%X\n",
le16_to_cpu(pg1->AttachedDevHandle));
static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
{
MPT_ADAPTER *ioc = phy_to_ioc(phy);
SasIoUnitControlRequest_t *req;
SasIoUnitControlReply_t *reply;
MPT_FRAME_HDR *mf;
MPIHeader_t *hdr;
unsigned long timeleft;
int error = -ERESTARTSYS;
/* not implemented for expanders */
if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP)
return -ENXIO;
if (down_interruptible(&ioc->sas_mgmt.mutex))
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
error = -ENOMEM;
goto out_unlock;
}
hdr = (MPIHeader_t *) mf;
req = (SasIoUnitControlRequest_t *)mf;
memset(req, 0, sizeof(SasIoUnitControlRequest_t));
req->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
req->MsgContext = hdr->MsgContext;
req->Operation = hard_reset ?
MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
req->PhyNum = phy->identify.phy_identifier;
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
10 * HZ);
if (!timeleft) {
/* On timeout reset the board */
mpt_free_msg_frame(ioc, mf);
mpt_HardResetHandler(ioc, CAN_SLEEP);
error = -ETIMEDOUT;
goto out_unlock;
}
/* a reply frame is expected */
if ((ioc->sas_mgmt.status &
MPT_IOCTL_STATUS_RF_VALID) == 0) {
error = -ENXIO;
goto out_unlock;
}
/* process the completed Reply Message Frame */
reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
__FUNCTION__,
reply->IOCStatus,
reply->IOCLogInfo);
error = -ENXIO;
goto out_unlock;
}
error = 0;
out_unlock:
up(&ioc->sas_mgmt.mutex);
out:
return error;
}
#else
#define mptsas_print_phy_data(phy_data) do { } while (0)
#define mptsas_print_phy_pg0(pg0) do { } while (0)
#define mptsas_print_device_pg0(pg0) do { } while (0)
#define mptsas_print_expander_pg1(pg1) do { } while (0)
#endif
static struct sas_function_template mptsas_transport_functions = {
.get_linkerrors = mptsas_get_linkerrors,
.phy_reset = mptsas_phy_reset,
};
static struct scsi_transport_template *mptsas_transport_template;
static int
mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
......@@ -680,7 +847,7 @@ mptsas_parse_device_info(struct sas_identify *identify,
}
static int mptsas_probe_one_phy(struct device *dev,
struct mptsas_phyinfo *phy_info, int index)
struct mptsas_phyinfo *phy_info, int index, int local)
{
struct sas_phy *port;
int error;
......@@ -773,6 +940,9 @@ static int mptsas_probe_one_phy(struct device *dev,
break;
}
if (local)
port->local_attached = 1;
error = sas_phy_add(port);
if (error) {
sas_phy_free(port);
......@@ -838,7 +1008,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
}
mptsas_probe_one_phy(&ioc->sh->shost_gendev,
&port_info->phy_info[i], *index);
&port_info->phy_info[i], *index, 1);
(*index)++;
}
......@@ -909,7 +1079,8 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
}
}
mptsas_probe_one_phy(parent, &port_info->phy_info[i], *index);
mptsas_probe_one_phy(parent, &port_info->phy_info[i],
*index, 0);
(*index)++;
}
......@@ -1021,6 +1192,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sh->unique_id = ioc->id;
INIT_LIST_HEAD(&ioc->sas_topology);
init_MUTEX(&ioc->sas_mgmt.mutex);
init_completion(&ioc->sas_mgmt.done);
/* Verify that we won't exceed the maximum
* number of chain buffers
......@@ -1207,6 +1380,7 @@ mptsas_init(void)
mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
mptsasInternalCtx =
mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
if (mpt_event_register(mptsasDoneCtx, mptscsih_event_process) == 0) {
devtprintk((KERN_INFO MYNAM
......@@ -1230,6 +1404,7 @@ mptsas_exit(void)
mpt_reset_deregister(mptsasDoneCtx);
mpt_event_deregister(mptsasDoneCtx);
mpt_deregister(mptsasMgmtCtx);
mpt_deregister(mptsasInternalCtx);
mpt_deregister(mptsasTaskCtx);
mpt_deregister(mptsasDoneCtx);
......
......@@ -1732,7 +1732,9 @@ static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
tw_dev->num_resets++;
printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Unit #%d: Command (0x%x) timed out, resetting card.\n", tw_dev->host->host_no, TW_DRIVER, 0x2c, SCpnt->device->id, SCpnt->cmnd[0]);
sdev_printk(KERN_WARNING, SCpnt->device,
"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
/* Now reset the card and some of the device extension data */
if (twa_reset_device_extension(tw_dev, 0)) {
......
......@@ -1432,7 +1432,9 @@ static int tw_scsi_eh_reset(struct scsi_cmnd *SCpnt)
tw_dev->num_resets++;
printk(KERN_WARNING "3w-xxxx: scsi%d: WARNING: Unit #%d: Command (0x%x) timed out, resetting card.\n", tw_dev->host->host_no, SCpnt->device->id, SCpnt->cmnd[0]);
sdev_printk(KERN_WARNING, SCpnt->device,
"WARNING: Command (0x%x) timed out, resetting card.\n",
SCpnt->cmnd[0]);
/* Now reset the card and some of the device extension data */
if (tw_reset_device_extension(tw_dev, 0)) {
......
......@@ -128,6 +128,7 @@
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <asm/dma.h>
#include <asm/system.h>
#include <asm/io.h>
......@@ -831,8 +832,8 @@ process_extended_message(struct Scsi_Host *host,
} else {
/* SDTR message out of the blue, reject it */
printk(KERN_WARNING "scsi%d Unexpected SDTR msg\n",
host->host_no);
shost_printk(KERN_WARNING, host,
"Unexpected SDTR msg\n");
hostdata->msgout[0] = A_REJECT_MSG;
dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
script_patch_16(hostdata->script, MessageCount, 1);
......@@ -906,15 +907,17 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
/* rejected our first simple tag message */
printk(KERN_WARNING "scsi%d (%d:%d) Rejected first tag queue attempt, turning off tag queueing\n", host->host_no, pun, lun);
scmd_printk(KERN_WARNING, SCp,
"Rejected first tag queue attempt, turning off tag queueing\n");
/* we're done negotiating */
NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
hostdata->tag_negotiated &= ~(1<<SCp->device->id);
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
SCp->device->tagged_supported = 0;
scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
} else {
printk(KERN_WARNING "scsi%d (%d:%d) Unexpected REJECT Message %s\n",
host->host_no, pun, lun,
shost_printk(KERN_WARNING, host,
"(%d:%d) Unexpected REJECT Message %s\n",
pun, lun,
NCR_700_phase[(dsps & 0xf00) >> 8]);
/* however, just ignore it */
}
......@@ -983,7 +986,8 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
if(SCp->cmnd[0] == REQUEST_SENSE) {
/* OOPS: bad device, returning another
* contingent allegiance condition */
printk(KERN_ERR "scsi%d (%d:%d) broken device is looping in contingent allegiance: ignoring\n", host->host_no, pun, lun);
scmd_printk(KERN_ERR, SCp,
"broken device is looping in contingent allegiance: ignoring\n");
NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
} else {
#ifdef NCR_DEBUG
......@@ -1047,12 +1051,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
// SCp->request_bufflen,
// DMA_FROM_DEVICE);
// if(((char *)SCp->request_buffer)[7] & 0x02) {
// printk(KERN_INFO "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", host->host_no, pun, lun);
// hostdata->tag_negotiated |= (1<<SCp->device->id);
// scmd_printk(KERN_INFO, SCp,
// "Enabling Tag Command Queuing\n");
// hostdata->tag_negotiated |= (1<<scmd_id(SCp));
// NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
// } else {
// NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
// hostdata->tag_negotiated &= ~(1<<SCp->device->id);
// hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
// }
//}
NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
......@@ -1060,11 +1065,11 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
__u8 i = (dsps & 0xf00) >> 8;
printk(KERN_ERR "scsi%d: (%d:%d), UNEXPECTED PHASE %s (%s)\n",
host->host_no, pun, lun,
scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
NCR_700_phase[i],
sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
printk(KERN_ERR " len = %d, cmd =", SCp->cmd_len);
scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
SCp->cmd_len);
scsi_print_command(SCp);
NCR_700_internal_bus_reset(host);
......@@ -1115,14 +1120,14 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
}
slot = (struct NCR_700_command_slot *)SCp->host_scribble;
DEBUG(("53c700: %d:%d:%d, reselection is tag %d, slot %p(%d)\n",
host->host_no, SDp->id, SDp->lun,
hostdata->msgin[2], slot, slot->tag));
DDEBUG(KERN_DEBUG, SDp,
"reselection is tag %d, slot %p(%d)\n",
hostdata->msgin[2], slot, slot->tag);
} else {
struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
if(unlikely(SCp == NULL)) {
printk(KERN_ERR "scsi%d: (%d:%d) no saved request for untagged cmd\n",
host->host_no, reselection_id, lun);
sdev_printk(KERN_ERR, SDp,
"no saved request for untagged cmd\n");
BUG();
}
slot = (struct NCR_700_command_slot *)SCp->host_scribble;
......@@ -1422,7 +1427,7 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
* If a contingent allegiance condition exists, the device
* will refuse all tags, so send the request sense as untagged
* */
if((hostdata->tag_negotiated & (1<<SCp->device->id))
if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
&& (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE)) {
count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
}
......@@ -1441,7 +1446,7 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
script_patch_ID(hostdata->script,
Device_ID, 1<<SCp->device->id);
Device_ID, 1<<scmd_id(SCp));
script_patch_32_abs(hostdata->script, CommandAddress,
slot->pCmd);
......@@ -1764,17 +1769,15 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
* - The blk layer sent and untagged command
*/
if(NCR_700_get_depth(SCp->device) != 0
&& (!(hostdata->tag_negotiated & (1<<SCp->device->id))
&& (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
|| !blk_rq_tagged(SCp->request))) {
DEBUG((KERN_ERR "scsi%d (%d:%d) has non zero depth %d\n",
SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
NCR_700_get_depth(SCp->device)));
CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
NCR_700_get_depth(SCp->device));
return SCSI_MLQUEUE_DEVICE_BUSY;
}
if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
DEBUG((KERN_ERR "scsi%d (%d:%d) has max tag depth %d\n",
SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
NCR_700_get_depth(SCp->device)));
CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
NCR_700_get_depth(SCp->device));
return SCSI_MLQUEUE_DEVICE_BUSY;
}
NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
......@@ -1796,10 +1799,10 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
scsi_print_command(SCp);
#endif
if(blk_rq_tagged(SCp->request)
&& (hostdata->tag_negotiated &(1<<SCp->device->id)) == 0
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
&& NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
printk(KERN_ERR "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
hostdata->tag_negotiated |= (1<<SCp->device->id);
scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
hostdata->tag_negotiated |= (1<<scmd_id(SCp));
NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
}
......@@ -1810,17 +1813,16 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
* FIXME: This will royally screw up on multiple LUN devices
* */
if(!blk_rq_tagged(SCp->request)
&& (hostdata->tag_negotiated &(1<<SCp->device->id))) {
printk(KERN_INFO "scsi%d: (%d:%d) Disabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
hostdata->tag_negotiated &= ~(1<<SCp->device->id);
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
}
if((hostdata->tag_negotiated &(1<<SCp->device->id))
if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
&& scsi_get_tag_type(SCp->device)) {
slot->tag = SCp->request->tag;
DEBUG(("53c700 %d:%d:%d, sending out tag %d, slot %p\n",
SCp->device->host->host_no, SCp->device->id, SCp->device->lun, slot->tag,
slot));
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
slot->tag, slot);
} else {
slot->tag = SCSI_NO_TAG;
/* must populate current_cmnd for scsi_find_tag to work */
......@@ -1920,8 +1922,8 @@ NCR_700_abort(struct scsi_cmnd * SCp)
{
struct NCR_700_command_slot *slot;
printk(KERN_INFO "scsi%d (%d:%d) New error handler wants to abort command\n\t",
SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
scmd_printk(KERN_INFO, SCp,
"New error handler wants to abort command\n\t");
scsi_print_command(SCp);
slot = (struct NCR_700_command_slot *)SCp->host_scribble;
......@@ -1954,8 +1956,8 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
printk(KERN_INFO "scsi%d (%d:%d) New error handler wants BUS reset, cmd %p\n\t",
SCp->device->host->host_no, SCp->device->id, SCp->device->lun, SCp);
scmd_printk(KERN_INFO, SCp,
"New error handler wants BUS reset, cmd %p\n\t", SCp);
scsi_print_command(SCp);
/* In theory, eh_complete should always be null because the
......@@ -1987,8 +1989,7 @@ NCR_700_bus_reset(struct scsi_cmnd * SCp)
STATIC int
NCR_700_host_reset(struct scsi_cmnd * SCp)
{
printk(KERN_INFO "scsi%d (%d:%d) New error handler wants HOST reset\n\t",
SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
scsi_print_command(SCp);
spin_lock_irq(SCp->device->host->host_lock);
......@@ -2110,7 +2111,7 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
/* shift back to the default unqueued number of commands
* (the user can still raise this) */
scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
hostdata->tag_negotiated &= ~(1 << SDp->id);
hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
} else {
/* Here, we cleared the negotiation flag above, so this
* will force the driver to renegotiate */
......
......@@ -22,8 +22,14 @@
#ifdef NCR_700_DEBUG
#define DEBUG(x) printk x
#define DDEBUG(prefix, sdev, fmt, a...) \
sdev_printk(prefix, sdev, fmt, ##a)
#define CDEBUG(prefix, scmd, fmt, a...) \
scmd_printk(prefix, scmd, fmt, ##a)
#else
#define DEBUG(x)
#define DEBUG(x) do {} while (0)
#define DDEBUG(prefix, scmd, fmt, a...) do {} while (0)
#define CDEBUG(prefix, scmd, fmt, a...) do {} while (0)
#endif
/* The number of available command slots */
......
......@@ -229,7 +229,7 @@ config SCSI_FC_ATTRS
config SCSI_ISCSI_ATTRS
tristate "iSCSI Transport Attributes"
depends on SCSI
depends on SCSI && NET
help
If you wish to export transport-specific information about
each attached iSCSI device to sysfs, say Y.
......@@ -247,6 +247,30 @@ endmenu
menu "SCSI low-level drivers"
depends on SCSI!=n
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
depends on SCSI && INET
select CRYPTO
select CRYPTO_MD5
select CRYPTO_CRC32C
select SCSI_ISCSI_ATTRS
help
The iSCSI Driver provides a host with the ability to access storage
through an IP network. The driver uses the iSCSI protocol to transport
SCSI requests and responses over a TCP/IP network between the host
(the "initiator") and "targets". Architecturally, the iSCSI driver
combines with the host's TCP/IP stack, network drivers, and Network
Interface Card (NIC) to provide the same functions as a SCSI or a
Fibre Channel (FC) adapter driver with a Host Bus Adapter (HBA).
To compile this driver as a module, choose M here: the
module will be called iscsi_tcp.
The userspace component needed to initialize the driver, documentation,
and sample configuration files can be found here:
http://linux-iscsi.sf.net
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
depends on SGI_IP22 && SCSI
......@@ -596,19 +620,6 @@ config SCSI_OMIT_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may wish to omit
it.
#
# This is marked broken because it uses over 4kB of stack in
# just two routines:
# 2076 CpqTsProcessIMQEntry
# 2052 PeekIMQEntry
#
config SCSI_CPQFCTS
tristate "Compaq Fibre Channel 64-bit/66Mhz HBA support"
depends on PCI && SCSI && BROKEN
help
Say Y here to compile in support for the Compaq StorageWorks Fibre
Channel 64-bit/66Mhz Host Bus Adapter.
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
......
......@@ -33,6 +33,7 @@ obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o
obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
obj-$(CONFIG_ISCSI_TCP) += iscsi_tcp.o
obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
......@@ -119,7 +120,6 @@ obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o
obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
obj-$(CONFIG_SCSI_FCAL) += fcal.o
obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o
......@@ -164,8 +164,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
cpqfc-objs := cpqfcTSinit.o cpqfcTScontrol.o cpqfcTSi2c.o \
cpqfcTSworker.o cpqfcTStrigger.o
libata-objs := libata-core.o libata-scsi.o
# Files generated that shall be removed upon make clean
......
......@@ -1247,13 +1247,13 @@ static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd)
case WRITE:
case WRITE_6:
case WRITE_10:
hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
hostdata->time_write[scmd_id(cmd)] += (jiffies - hostdata->timebase);
hostdata->pendingw--;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
hostdata->time_read[scmd_id(cmd)] += (jiffies - hostdata->timebase);
hostdata->pendingr--;
break;
}
......@@ -1385,7 +1385,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
* the host and target ID's on the SCSI bus.
*/
NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id)));
NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << scmd_id(cmd))));
/*
* Raise ATN while SEL is true before BSY goes false from arbitration,
......@@ -1430,7 +1430,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
udelay(1);
dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, cmd->device->id));
dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)));
/*
* The SCSI specification calls for a 250 ms timeout for the actual
......@@ -1483,7 +1483,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
if (hostdata->targets_present & (1 << cmd->device->id)) {
if (hostdata->targets_present & (1 << scmd_id(cmd))) {
printk(KERN_DEBUG "scsi%d : weirdness\n", instance->host_no);
if (hostdata->restart_select)
printk(KERN_DEBUG "\trestart select\n");
......@@ -1499,7 +1499,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return 0;
}
hostdata->targets_present |= (1 << cmd->device->id);
hostdata->targets_present |= (1 << scmd_id(cmd));
/*
* Since we followed the SCSI spec, and raised ATN while SEL
......@@ -2190,7 +2190,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
* If the watchdog timer fires, all future accesses to this
* device will use the polled-IO.
*/
printk("scsi%d : switching target %d lun %d to slow handshake\n", instance->host_no, cmd->device->id, cmd->device->lun);
scmd_printk(KERN_INFO, cmd,
"switching to slow handshake\n");
cmd->device->borken = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
sink = 1;
......@@ -2429,9 +2430,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
scsi_print_msg(extended_msg);
printk("\n");
} else if (tmp != EXTENDED_MESSAGE)
printk("scsi%d: rejecting unknown message %02x from target %d, lun %d\n", instance->host_no, tmp, cmd->device->id, cmd->device->lun);
scmd_printk(KERN_INFO, cmd,
"rejecting unknown message %02x\n",tmp);
else
printk("scsi%d: rejecting unknown extended message code %02x, length %d from target %d, lun %d\n", instance->host_no, extended_msg[1], extended_msg[0], cmd->device->id, cmd->device->lun);
scmd_printk(KERN_INFO, cmd,
"rejecting unknown extended message code %02x, length %d\n", extended_msg[1], extended_msg[0]);
msgout = MESSAGE_REJECT;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
......
......@@ -936,7 +936,7 @@ static void esp_release_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void esp_restore_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
struct esp_pointers *ep = &esp->data_pointers[sp->device->id];
struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
sp->SCp.ptr = ep->saved_ptr;
sp->SCp.buffer = ep->saved_buffer;
......@@ -946,7 +946,7 @@ static void esp_restore_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void esp_save_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
struct esp_pointers *ep = &esp->data_pointers[sp->device->id];
struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
ep->saved_ptr = sp->SCp.ptr;
ep->saved_buffer = sp->SCp.buffer;
......@@ -1693,13 +1693,13 @@ static inline void esp_connect(struct NCR_ESP *esp, struct ESP_regs *eregs,
if(esp->prev_soff != esp_dev->sync_max_offset ||
esp->prev_stp != esp_dev->sync_min_period ||
(esp->erev > esp100a &&
esp->prev_cfg3 != esp->config3[sp->device->id])) {
esp->prev_cfg3 != esp->config3[scmd_id(sp)])) {
esp->prev_soff = esp_dev->sync_max_offset;
esp_write(eregs->esp_soff, esp->prev_soff);
esp->prev_stp = esp_dev->sync_min_period;
esp_write(eregs->esp_stp, esp->prev_stp);
if(esp->erev > esp100a) {
esp->prev_cfg3 = esp->config3[sp->device->id];
esp->prev_cfg3 = esp->config3[scmd_id(sp)];
esp_write(eregs->esp_cfg3, esp->prev_cfg3);
}
}
......@@ -2205,7 +2205,7 @@ static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs)
if(SCptr->SCp.Status != GOOD &&
SCptr->SCp.Status != CONDITION_GOOD &&
((1<<SCptr->device->id) & esp->targets_present) &&
((1<<scmd_id(SCptr)) & esp->targets_present) &&
esp_dev->sync && esp_dev->sync_max_offset) {
/* SCSI standard says that the synchronous capabilities
* should be renegotiated at this point. Most likely
......@@ -2597,7 +2597,7 @@ static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
*/
if(esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
/* target speaks... */
esp->targets_present |= (1<<SCptr->device->id);
esp->targets_present |= (1<<scmd_id(SCptr));
/* What if the target ignores the sdtr? */
if(esp->snip)
......@@ -3064,7 +3064,7 @@ static int check_multibyte_msg(struct NCR_ESP *esp,
ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
esp_dev->sync_max_offset,
esp_dev->sync_min_period,
esp->config3[SCptr->device->id]));
esp->config3[scmd_id(SCptr)]));
esp->snip = 0;
} else if(esp_dev->sync_max_offset) {
......@@ -3621,7 +3621,7 @@ void esp_slave_destroy(Scsi_Device *SDptr)
{
struct NCR_ESP *esp = (struct NCR_ESP *) SDptr->host->hostdata;
esp->targets_present &= ~(1 << SDptr->id);
esp->targets_present &= ~(1 << sdev_id(SDptr));
kfree(SDptr->hostdata);
SDptr->hostdata = NULL;
}
......
......@@ -710,7 +710,7 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
/* We are locked here already by the mid layer */
REG0;
outb(SCpnt->device->id, DEST_ID); /* set destination */
outb(scmd_id(SCpnt), DEST_ID); /* set destination */
outb(FLUSH_FIFO, CMD_REG); /* reset the fifos */
for (i = 0; i < SCpnt->cmd_len; i++) {
......
......@@ -923,7 +923,7 @@ static int inia100_device_reset(struct scsi_cmnd * SCpnt)
{ /* I need Host Control Block Information */
ORC_HCS *pHCB;
pHCB = (ORC_HCS *) SCpnt->device->host->hostdata;
return orc_device_reset(pHCB, SCpnt, SCpnt->device->id);
return orc_device_reset(pHCB, SCpnt, scmd_id(SCpnt));
}
......
......@@ -57,7 +57,7 @@ Deanna Bonds (non-DASD support, PAE fibs and 64 bit,
(fixed 64bit and 64G memory model, changed confusing naming convention
where fibs that go to the hardware are consistently called hw_fibs and
not just fibs like the name of the driver tracking structure)
Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas. Performance tuning, card failover and bug mitigations.
Original Driver
-------------------------
......
o Testing
o More testing
o Drop irq_mask, basically unused
o I/O size increase
......@@ -359,15 +359,6 @@ int aac_get_containers(struct aac_dev *dev)
return status;
}
static void aac_io_done(struct scsi_cmnd * scsicmd)
{
unsigned long cpu_flags;
struct Scsi_Host *host = scsicmd->device->host;
spin_lock_irqsave(host->host_lock, cpu_flags);
scsicmd->scsi_done(scsicmd);
spin_unlock_irqrestore(host->host_lock, cpu_flags);
}
static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
{
void *buf;
......@@ -424,7 +415,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(scsicmd);
scsicmd->scsi_done(scsicmd);
}
/**
......@@ -608,17 +599,43 @@ static char *container_types[] = {
* files instead of in OS dependant driver source.
*/
static void setinqstr(int devtype, void *data, int tindex)
static void setinqstr(struct aac_dev *dev, void *data, int tindex)
{
struct scsi_inq *str;
struct aac_driver_ident *mp;
mp = aac_get_driver_ident(devtype);
str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
inqstrcpy (mp->vname, str->vid);
inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
memset(str, ' ', sizeof(*str));
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
int c = sizeof(str->vid);
while (*cp && *cp != ' ' && --c)
++cp;
c = *cp;
*cp = '\0';
inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
c = 0;
if (strlen(cp) > sizeof(str->pid)) {
c = cp[sizeof(str->pid)];
cp[sizeof(str->pid)] = '\0';
}
inqstrcpy (cp, str->pid);
if (c)
cp[sizeof(str->pid)] = c;
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
inqstrcpy (mp->vname, str->vid);
/* last six chars reserved for vol type */
inqstrcpy (mp->model, str->pid);
}
if (tindex < (sizeof(container_types)/sizeof(char *))){
char *findit = str->pid;
......@@ -627,7 +644,9 @@ static void setinqstr(int devtype, void *data, int tindex)
/* RAID is superfluous in the context of a RAID device */
if (memcmp(findit-4, "RAID", 4) == 0)
*(findit -= 4) = ' ';
inqstrcpy (container_types[tindex], findit + 1);
if (((findit - str->pid) + strlen(container_types[tindex]))
< (sizeof(str->pid) + sizeof(str->prl)))
inqstrcpy (container_types[tindex], findit + 1);
}
inqstrcpy ("V1.0", str->prl);
}
......@@ -822,12 +841,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->dac_support = (dacmode!=0);
}
if(dev->dac_support != 0) {
if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) &&
!pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) {
if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
dev->name, dev->id);
} else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) &&
!pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) {
} else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) {
printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
dev->name, dev->id);
dev->dac_support = 0;
......@@ -960,7 +979,7 @@ static void io_callback(void *context, struct fib * fibptr)
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(scsicmd);
scsicmd->scsi_done(scsicmd);
}
static int aac_read(struct scsi_cmnd * scsicmd, int cid)
......@@ -1139,7 +1158,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
aac_io_done(scsicmd);
scsicmd->scsi_done(scsicmd);
fib_complete(cmd_fibcontext);
fib_free(cmd_fibcontext);
return 0;
......@@ -1211,7 +1230,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
*/
if (!(cmd_fibcontext = fib_alloc(dev))) {
scsicmd->result = DID_ERROR << 16;
aac_io_done(scsicmd);
scsicmd->scsi_done(scsicmd);
return 0;
}
fib_init(cmd_fibcontext);
......@@ -1308,7 +1327,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
aac_io_done(scsicmd);
scsicmd->scsi_done(scsicmd);
fib_complete(cmd_fibcontext);
fib_free(cmd_fibcontext);
......@@ -1352,7 +1371,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(cmd);
cmd->scsi_done(cmd);
}
static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
......@@ -1438,7 +1457,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
int cardtype = dev->cardtype;
int ret;
/*
......@@ -1446,7 +1464,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* Test does not apply to ID 16, the pseudo id for the controller
* itself.
*/
if (scsicmd->device->id != host->this_id) {
if (scmd_id(scsicmd) != host->this_id) {
if ((scsicmd->device->channel == 0) ){
if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
scsicmd->result = DID_NO_CONNECT << 16;
......@@ -1541,15 +1559,15 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* Set the Vendor, Product, and Revision Level
* see: <vendor>.c i.e. aac.c
*/
if (scsicmd->device->id == host->this_id) {
setinqstr(cardtype, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *)));
if (scmd_id(scsicmd) == host->this_id) {
setinqstr(dev, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *)));
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
setinqstr(cardtype, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
return aac_get_container_name(scsicmd, cid);
......@@ -1931,7 +1949,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
* the channel is 2
*/
} else if ((dev->raid_scsi_mode) &&
(scsicmd->device->channel == 2)) {
(scmd_channel(scsicmd) == 2)) {
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8;
} else {
......@@ -1975,7 +1993,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
* the channel is 2
*/
} else if ((dev->raid_scsi_mode) &&
(scsicmd->device->channel == 2)) {
(scmd_channel(scsicmd) == 2)) {
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8;
} else {
......@@ -2070,7 +2088,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(scsicmd);
scsicmd->scsi_done(scsicmd);
}
/**
......
......@@ -481,6 +481,7 @@ enum aac_log_level {
#define FSAFS_NTC_FIB_CONTEXT 0x030c
struct aac_dev;
struct fib;
struct adapter_ops
{
......@@ -489,6 +490,7 @@ struct adapter_ops
void (*adapter_disable_int)(struct aac_dev *dev);
int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
int (*adapter_check_health)(struct aac_dev *dev);
int (*adapter_send)(struct fib * fib);
};
/*
......@@ -659,6 +661,10 @@ struct rx_mu_registers {
Status Register */
__le32 OIMR; /* 1334h | 34h | Outbound Interrupt
Mask Register */
__le32 reserved2; /* 1338h | 38h | Reserved */
__le32 reserved3; /* 133Ch | 3Ch | Reserved */
__le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */
__le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */
/* * Must access through ATU Inbound
Translation Window */
};
......@@ -693,8 +699,8 @@ struct rx_inbound {
#define OutboundDoorbellReg MUnit.ODR
struct rx_registers {
struct rx_mu_registers MUnit; /* 1300h - 1334h */
__le32 reserved1[6]; /* 1338h - 134ch */
struct rx_mu_registers MUnit; /* 1300h - 1344h */
__le32 reserved1[2]; /* 1348h - 134ch */
struct rx_inbound IndexRegs;
};
......@@ -711,8 +717,8 @@ struct rx_registers {
#define rkt_inbound rx_inbound
struct rkt_registers {
struct rkt_mu_registers MUnit; /* 1300h - 1334h */
__le32 reserved1[1010]; /* 1338h - 22fch */
struct rkt_mu_registers MUnit; /* 1300h - 1344h */
__le32 reserved1[1006]; /* 1348h - 22fch */
struct rkt_inbound IndexRegs; /* 2300h - */
};
......@@ -721,8 +727,6 @@ struct rkt_registers {
#define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR))
#define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR))
struct fib;
typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
struct aac_fib_context {
......@@ -937,7 +941,6 @@ struct aac_dev
const char *name;
int id;
u16 irq_mask;
/*
* negotiated FIB settings
*/
......@@ -972,6 +975,7 @@ struct aac_dev
struct adapter_ops a_ops;
unsigned long fsrev; /* Main driver's revision number */
unsigned base_size; /* Size of mapped in region */
struct aac_init *init; /* Holds initialization info to communicate with adapter */
dma_addr_t init_pa; /* Holds physical address of the init struct */
......@@ -992,6 +996,9 @@ struct aac_dev
/*
* The following is the device specific extension.
*/
#if (!defined(AAC_MIN_FOOTPRINT_SIZE))
# define AAC_MIN_FOOTPRINT_SIZE 8192
#endif
union
{
struct sa_registers __iomem *sa;
......@@ -1012,6 +1019,7 @@ struct aac_dev
u8 nondasd_support;
u8 dac_support;
u8 raid_scsi_mode;
u8 new_comm_interface;
/* macro side-effects BEWARE */
# define raw_io_interface \
init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
......@@ -1034,6 +1042,8 @@ struct aac_dev
#define aac_adapter_check_health(dev) \
(dev)->a_ops.adapter_check_health(dev)
#define aac_adapter_send(fib) \
((fib)->dev)->a_ops.adapter_send(fib)
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
......@@ -1560,7 +1570,7 @@ struct fib_ioctl
struct revision
{
__le32 compat;
u32 compat;
__le32 version;
__le32 build;
};
......@@ -1779,6 +1789,7 @@ int aac_rkt_init(struct aac_dev *dev);
int aac_sa_init(struct aac_dev *dev);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
int aac_command_thread(struct aac_dev * dev);
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
int fib_adapter_complete(struct fib * fibptr, unsigned short size);
......
......@@ -408,7 +408,7 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
char *driver_version = aac_driver_version;
u32 version;
response.compat = cpu_to_le32(1);
response.compat = 1;
version = (simple_strtol(driver_version,
&driver_version, 10) << 24) | 0x00000400;
version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
......@@ -574,7 +574,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)usg->sg[i].addr;
sg_user[i] = (void __user *)(long)usg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
......@@ -624,7 +624,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)upsg->sg[i].addr;
sg_user[i] = (void __user *)(long)upsg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
......
......@@ -116,6 +116,10 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
}
init->InitFlags = 0;
if (dev->new_comm_interface) {
init->InitFlags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
}
init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
......@@ -315,12 +319,33 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
- sizeof(struct aac_fibhdr)
- sizeof(struct aac_write) + sizeof(struct sgentry))
/ sizeof(struct sgentry);
dev->new_comm_interface = 0;
dev->raw_io_64 = 0;
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
(status[0] == 0x00000001)) {
if (status[1] & AAC_OPT_NEW_COMM_64)
dev->raw_io_64 = 1;
if (status[1] & AAC_OPT_NEW_COMM)
dev->new_comm_interface = dev->a_ops.adapter_send != 0;
if (dev->new_comm_interface && (status[2] > dev->base_size)) {
iounmap(dev->regs.sa);
dev->base_size = status[2];
dprintk((KERN_DEBUG "ioremap(%lx,%d)\n",
host->base, status[2]));
dev->regs.sa = ioremap(host->base, status[2]);
if (dev->regs.sa == NULL) {
/* remap failed, go back ... */
dev->new_comm_interface = 0;
dev->regs.sa = ioremap(host->base,
AAC_MIN_FOOTPRINT_SIZE);
if (dev->regs.sa == NULL) {
printk(KERN_WARNING
"aacraid: unable to map adapter.\n");
return NULL;
}
}
}
}
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
......
......@@ -212,7 +212,7 @@ void fib_init(struct fib *fibptr)
hw_fib->header.StructType = FIB_MAGIC;
hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
}
......@@ -380,9 +380,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
{
u32 index;
struct aac_dev * dev = fibptr->dev;
unsigned long nointr = 0;
struct hw_fib * hw_fib = fibptr->hw_fib;
struct aac_queue * q;
unsigned long flags = 0;
......@@ -417,7 +415,7 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
* Map the fib into 32bits by using the fib number
*/
hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
/*
* Set FIB state to indicate where it came from and if we want a
......@@ -456,10 +454,10 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
dprintk((KERN_DEBUG "Fib contents:.\n"));
dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
......@@ -469,14 +467,37 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
if(wait)
spin_lock_irqsave(&fibptr->event_lock, flags);
spin_lock_irqsave(q->lock, qflags);
aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
list_add_tail(&fibptr->queue, &q->pendingq);
q->numpending++;
*(q->headers.producer) = cpu_to_le32(index + 1);
spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
if (dev->new_comm_interface) {
unsigned long count = 10000000L; /* 50 seconds */
list_add_tail(&fibptr->queue, &q->pendingq);
q->numpending++;
spin_unlock_irqrestore(q->lock, qflags);
while (aac_adapter_send(fibptr) != 0) {
if (--count == 0) {
if (wait)
spin_unlock_irqrestore(&fibptr->event_lock, flags);
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
list_del(&fibptr->queue);
spin_unlock_irqrestore(q->lock, qflags);
return -ETIMEDOUT;
}
udelay(5);
}
} else {
u32 index;
unsigned long nointr = 0;
aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
list_add_tail(&fibptr->queue, &q->pendingq);
q->numpending++;
*(q->headers.producer) = cpu_to_le32(index + 1);
spin_unlock_irqrestore(q->lock, qflags);
dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
}
/*
* If the caller wanted us to wait for response wait now.
*/
......@@ -492,7 +513,6 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
* hardware failure has occurred.
*/
unsigned long count = 36000000L; /* 3 minutes */
unsigned long qflags;
while (down_trylock(&fibptr->event_wait)) {
if (--count == 0) {
spin_lock_irqsave(q->lock, qflags);
......@@ -621,12 +641,16 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
unsigned long qflags;
if (hw_fib->header.XferState == 0) {
if (dev->new_comm_interface)
kfree (hw_fib);
return 0;
}
/*
* If we plan to do anything check the structure type first.
*/
if ( hw_fib->header.StructType != FIB_MAGIC ) {
if (dev->new_comm_interface)
kfree (hw_fib);
return -EINVAL;
}
/*
......@@ -637,21 +661,25 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
* send the completed cdb to the adapter.
*/
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
u32 index;
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
if (size) {
size += sizeof(struct aac_fibhdr);
if (size > le16_to_cpu(hw_fib->header.SenderSize))
return -EMSGSIZE;
hw_fib->header.Size = cpu_to_le16(size);
if (dev->new_comm_interface) {
kfree (hw_fib);
} else {
u32 index;
hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
if (size) {
size += sizeof(struct aac_fibhdr);
if (size > le16_to_cpu(hw_fib->header.SenderSize))
return -EMSGSIZE;
hw_fib->header.Size = cpu_to_le16(size);
}
q = &dev->queues->queue[AdapNormRespQueue];
spin_lock_irqsave(q->lock, qflags);
aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
*(q->headers.producer) = cpu_to_le32(index + 1);
spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & (int)aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormRespQueue);
}
q = &dev->queues->queue[AdapNormRespQueue];
spin_lock_irqsave(q->lock, qflags);
aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
*(q->headers.producer) = cpu_to_le32(index + 1);
spin_unlock_irqrestore(q->lock, qflags);
if (!(nointr & (int)aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormRespQueue);
}
else
{
......
......@@ -73,7 +73,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
int fast;
u32 index = le32_to_cpu(entry->addr);
fast = index & 0x01;
fib = &dev->fibs[index >> 1];
fib = &dev->fibs[index >> 2];
hwfib = fib->hw_fib;
aac_consumer_free(dev, q, HostNormRespQueue);
......@@ -213,3 +213,116 @@ unsigned int aac_command_normal(struct aac_queue *q)
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
/**
* aac_intr_normal - Handle command replies
* @dev: Device
* @index: completion reference
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting.
*/
unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
{
u32 index = le32_to_cpu(Index);
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index));
if ((index & 0x00000002L)) {
struct hw_fib * hw_fib;
struct fib * fib;
struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
unsigned long flags;
if (index == 0xFFFFFFFEL) /* Special Case */
return 0; /* Do nothing */
/*
* Allocate a FIB. For non queued stuff we can just use
* the stack so we are happy. We need a fib object in order to
* manage the linked lists.
*/
if ((!dev->aif_thread)
|| (!(fib = kmalloc(sizeof(struct fib),GFP_ATOMIC))))
return 1;
if (!(hw_fib = kmalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
kfree (fib);
return 1;
}
memset(hw_fib, 0, sizeof(struct hw_fib));
memcpy(hw_fib, (struct hw_fib *)(((unsigned long)(dev->regs.sa)) + (index & ~0x00000002L)), sizeof(struct hw_fib));
memset(fib, 0, sizeof(struct fib));
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
fib->hw_fib = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
spin_lock_irqsave(q->lock, flags);
list_add_tail(&fib->fiblink, &q->cmdq);
wake_up_interruptible(&q->cmdready);
spin_unlock_irqrestore(q->lock, flags);
return 1;
} else {
int fast = index & 0x01;
struct fib * fib = &dev->fibs[index >> 2];
struct hw_fib * hwfib = fib->hw_fib;
/*
* Remove this fib from the Outstanding I/O queue.
* But only if it has not already been timed out.
*
* If the fib has been timed out already, then just
* continue. The caller has already been notified that
* the fib timed out.
*/
if ((fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
printk(KERN_DEBUG"aacraid: hwfib=%p index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
return 0;
}
list_del(&fib->queue);
dev->queues->queue[AdapNormCmdQueue].numpending--;
if (fast) {
/*
* Doctor the fib
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
{
u32 *pstatus = (u32 *)hwfib->data;
if (*pstatus & cpu_to_le32(0xffff0000))
*pstatus = cpu_to_le32(ST_OK);
}
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
{
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
else
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
/*
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));
spin_lock_irqsave(&fib->event_lock, flagv);
fib->done = 1;
up(&fib->event_wait);
spin_unlock_irqrestore(&fib->event_lock, flagv);
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
}
return 0;
}
}
......@@ -752,8 +752,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (error)
goto out;
if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFULL))
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
goto out;
/*
* If the quirk31 bit is set, the adapter needs adapter
......@@ -788,8 +788,29 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
goto out_free_host;
spin_lock_init(&aac->fib_lock);
if ((*aac_drivers[index].init)(aac))
/*
* Map in the registers from the adapter.
*/
aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
if ((aac->regs.sa = ioremap(
(unsigned long)aac->scsi_host_ptr->base, AAC_MIN_FOOTPRINT_SIZE))
== NULL) {
printk(KERN_WARNING "%s: unable to map adapter.\n",
AAC_DRIVERNAME);
goto out_free_fibs;
}
if ((*aac_drivers[index].init)(aac))
goto out_unmap;
/*
* Start any kernel threads needed
*/
aac->thread_pid = kernel_thread((int (*)(void *))aac_command_thread,
aac, 0);
if (aac->thread_pid < 0) {
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
goto out_deinit;
}
/*
* If we had set a smaller DMA mask earlier, set it to 4gig
......@@ -797,9 +818,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
* address space.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL))
goto out_free_fibs;
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
goto out_deinit;
aac->maximum_num_channels = aac_drivers[index].channels;
error = aac_get_adapter_info(aac);
if (error < 0)
......@@ -866,10 +887,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
free_irq(pdev->irq, aac);
out_unmap:
fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
kfree(aac->queues);
free_irq(pdev->irq, aac);
iounmap(aac->regs.sa);
out_free_fibs:
kfree(aac->fibs);
......@@ -910,6 +932,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
iounmap(aac->regs.sa);
kfree(aac->fibs);
kfree(aac->fsa_dev);
list_del(&aac->entry);
scsi_host_put(shost);
......
......@@ -49,40 +49,57 @@
static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct aac_dev *dev = dev_id;
unsigned long bellbits;
u8 intstat, mask;
intstat = rkt_readb(dev, MUnit.OISR);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have
* been enabled.
*/
mask = ~(dev->OIMR);
/* Check to see if this is our interrupt. If it isn't just return */
if (intstat & mask)
{
bellbits = rkt_readl(dev, OutboundDoorbellReg);
if (bellbits & DoorBellPrintfReady) {
aac_printf(dev, rkt_readl(dev, IndexRegs.Mailbox[5]));
rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (bellbits & DoorBellAdapterNormCmdReady) {
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
}
else if (bellbits & DoorBellAdapterNormRespReady) {
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
}
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
if (dev->new_comm_interface) {
u32 Index = rkt_readl(dev, MUnit.OutboundQueue);
if (Index == 0xFFFFFFFFL)
Index = rkt_readl(dev, MUnit.OutboundQueue);
if (Index != 0xFFFFFFFFL) {
do {
if (aac_intr_normal(dev, Index)) {
rkt_writel(dev, MUnit.OutboundQueue, Index);
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
}
Index = rkt_readl(dev, MUnit.OutboundQueue);
} while (Index != 0xFFFFFFFFL);
return IRQ_HANDLED;
}
else if (bellbits & DoorBellAdapterNormRespNotFull) {
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
} else {
unsigned long bellbits;
u8 intstat;
intstat = rkt_readb(dev, MUnit.OISR);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have
* been enabled.
* Check to see if this is our interrupt. If it isn't just return
*/
if (intstat & ~(dev->OIMR))
{
bellbits = rkt_readl(dev, OutboundDoorbellReg);
if (bellbits & DoorBellPrintfReady) {
aac_printf(dev, rkt_readl (dev, IndexRegs.Mailbox[5]));
rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (bellbits & DoorBellAdapterNormCmdReady) {
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
// rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
}
else if (bellbits & DoorBellAdapterNormRespReady) {
rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
}
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
}
else if (bellbits & DoorBellAdapterNormRespNotFull) {
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
}
return IRQ_HANDLED;
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
......@@ -173,7 +190,10 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Restore interrupt mask even though we timed out
*/
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
if (dev->new_comm_interface)
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
else
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
return -ETIMEDOUT;
}
/*
......@@ -196,7 +216,10 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Restore interrupt mask
*/
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
if (dev->new_comm_interface)
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
else
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
return 0;
}
......@@ -268,15 +291,6 @@ static void aac_rkt_start_adapter(struct aac_dev *dev)
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
/*
* First clear out all interrupts. Then enable the one's that we
* can handle.
*/
rkt_writeb(dev, MUnit.OIMR, 0xff);
rkt_writel(dev, MUnit.ODR, 0xffffffff);
// rkt_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
// We can only use a 32 bit address here
rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
......@@ -349,6 +363,39 @@ static int aac_rkt_check_health(struct aac_dev *dev)
return 0;
}
/**
* aac_rkt_send
* @fib: fib to issue
*
* Will send a fib, returning 0 if successful.
*/
static int aac_rkt_send(struct fib * fib)
{
u64 addr = fib->hw_fib_pa;
struct aac_dev *dev = fib->dev;
volatile void __iomem *device = dev->regs.rkt;
u32 Index;
dprintk((KERN_DEBUG "%p->aac_rkt_send(%p->%llx)\n", dev, fib, addr));
Index = rkt_readl(dev, MUnit.InboundQueue);
if (Index == 0xFFFFFFFFL)
Index = rkt_readl(dev, MUnit.InboundQueue);
dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
if (Index == 0xFFFFFFFFL)
return Index;
device += Index;
dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
(u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
writel((u32)(addr & 0xffffffff), device);
device += sizeof(u32);
writel((u32)(addr >> 32), device);
device += sizeof(u32);
writel(le16_to_cpu(fib->hw_fib->header.Size), device);
rkt_writel(dev, MUnit.InboundQueue, Index);
dprintk((KERN_DEBUG "aac_rkt_send - return 0\n"));
return 0;
}
/**
* aac_rkt_init - initialize an i960 based AAC card
* @dev: device to configure
......@@ -369,13 +416,8 @@ int aac_rkt_init(struct aac_dev *dev)
name = dev->name;
/*
* Map in the registers from the adapter.
* Check to see if the board panic'd while booting.
*/
if((dev->regs.rkt = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
{
printk(KERN_WARNING "aacraid: unable to map i960.\n" );
goto error_iounmap;
}
/*
* Check to see if the board failed any self tests.
*/
......@@ -426,6 +468,7 @@ int aac_rkt_init(struct aac_dev *dev)
dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
dev->a_ops.adapter_check_health = aac_rkt_check_health;
dev->a_ops.adapter_send = aac_rkt_send;
/*
* First clear out all interrupts. Then enable the one's that we
......@@ -437,15 +480,24 @@ int aac_rkt_init(struct aac_dev *dev)
if (aac_init_adapter(dev) == NULL)
goto error_irq;
/*
* Start any kernel threads needed
*/
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
if(dev->thread_pid < 0)
{
printk(KERN_ERR "aacraid: Unable to create rkt thread.\n");
goto error_kfree;
}
if (dev->new_comm_interface) {
/*
* FIB Setup has already been done, but we can minimize the
* damage by at least ensuring the OS never issues more
* commands than we can handle. The Rocket adapters currently
* can only handle 246 commands and 8 AIFs at the same time,
* and in fact do notify us accordingly if we negotiate the
* FIB size. The problem that causes us to add this check is
* to ensure that we do not overdo it with the adapter when a
* hard coded FIB override is being utilized. This special
* case warrants this half baked, but convenient, check here.
*/
if (dev->scsi_host_ptr->can_queue > (246 - AAC_NUM_MGT_FIB)) {
dev->init->MaxIoCommands = cpu_to_le32(246);
dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB;
}
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
}
/*
* Tell the adapter that all is configured, and it can start
* accepting requests
......@@ -453,15 +505,11 @@ int aac_rkt_init(struct aac_dev *dev)
aac_rkt_start_adapter(dev);
return 0;
error_kfree:
kfree(dev->queues);
error_irq:
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap:
iounmap(dev->regs.rkt);
return -1;
}
......@@ -49,40 +49,57 @@
static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct aac_dev *dev = dev_id;
unsigned long bellbits;
u8 intstat, mask;
intstat = rx_readb(dev, MUnit.OISR);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have
* been enabled.
*/
mask = ~(dev->OIMR);
/* Check to see if this is our interrupt. If it isn't just return */
if (intstat & mask)
{
bellbits = rx_readl(dev, OutboundDoorbellReg);
if (bellbits & DoorBellPrintfReady) {
aac_printf(dev, rx_readl(dev, IndexRegs.Mailbox[5]));
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (bellbits & DoorBellAdapterNormCmdReady) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
}
else if (bellbits & DoorBellAdapterNormRespReady) {
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
}
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
dprintk((KERN_DEBUG "aac_rx_intr(%d,%p,%p)\n", irq, dev_id, regs));
if (dev->new_comm_interface) {
u32 Index = rx_readl(dev, MUnit.OutboundQueue);
if (Index == 0xFFFFFFFFL)
Index = rx_readl(dev, MUnit.OutboundQueue);
if (Index != 0xFFFFFFFFL) {
do {
if (aac_intr_normal(dev, Index)) {
rx_writel(dev, MUnit.OutboundQueue, Index);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
}
Index = rx_readl(dev, MUnit.OutboundQueue);
} while (Index != 0xFFFFFFFFL);
return IRQ_HANDLED;
}
else if (bellbits & DoorBellAdapterNormRespNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
} else {
unsigned long bellbits;
u8 intstat;
intstat = rx_readb(dev, MUnit.OISR);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have
* been enabled.
* Check to see if this is our interrupt. If it isn't just return
*/
if (intstat & ~(dev->OIMR))
{
bellbits = rx_readl(dev, OutboundDoorbellReg);
if (bellbits & DoorBellPrintfReady) {
aac_printf(dev, rx_readl (dev, IndexRegs.Mailbox[5]));
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (bellbits & DoorBellAdapterNormCmdReady) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
}
else if (bellbits & DoorBellAdapterNormRespReady) {
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
}
else if (bellbits & DoorBellAdapterNormCmdNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
}
else if (bellbits & DoorBellAdapterNormRespNotFull) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
}
return IRQ_HANDLED;
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
......@@ -173,7 +190,10 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Restore interrupt mask even though we timed out
*/
rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
if (dev->new_comm_interface)
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
else
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
return -ETIMEDOUT;
}
/*
......@@ -196,7 +216,10 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Restore interrupt mask
*/
rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
if (dev->new_comm_interface)
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
else
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
return 0;
}
......@@ -267,15 +290,6 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
/*
* First clear out all interrupts. Then enable the one's that we
* can handle.
*/
rx_writeb(dev, MUnit.OIMR, 0xff);
rx_writel(dev, MUnit.ODR, 0xffffffff);
// rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
......@@ -348,6 +362,39 @@ static int aac_rx_check_health(struct aac_dev *dev)
return 0;
}
/**
* aac_rx_send
* @fib: fib to issue
*
* Will send a fib, returning 0 if successful.
*/
static int aac_rx_send(struct fib * fib)
{
u64 addr = fib->hw_fib_pa;
struct aac_dev *dev = fib->dev;
volatile void __iomem *device = dev->regs.rx;
u32 Index;
dprintk((KERN_DEBUG "%p->aac_rx_send(%p->%llx)\n", dev, fib, addr));
Index = rx_readl(dev, MUnit.InboundQueue);
if (Index == 0xFFFFFFFFL)
Index = rx_readl(dev, MUnit.InboundQueue);
dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
if (Index == 0xFFFFFFFFL)
return Index;
device += Index;
dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
(u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
writel((u32)(addr & 0xffffffff), device);
device += sizeof(u32);
writel((u32)(addr >> 32), device);
device += sizeof(u32);
writel(le16_to_cpu(fib->hw_fib->header.Size), device);
rx_writel(dev, MUnit.InboundQueue, Index);
dprintk((KERN_DEBUG "aac_rx_send - return 0\n"));
return 0;
}
/**
* aac_rx_init - initialize an i960 based AAC card
* @dev: device to configure
......@@ -368,13 +415,8 @@ int aac_rx_init(struct aac_dev *dev)
name = dev->name;
/*
* Map in the registers from the adapter.
* Check to see if the board panic'd while booting.
*/
if((dev->regs.rx = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
{
printk(KERN_WARNING "aacraid: unable to map i960.\n" );
return -1;
}
/*
* Check to see if the board failed any self tests.
*/
......@@ -426,6 +468,7 @@ int aac_rx_init(struct aac_dev *dev)
dev->a_ops.adapter_notify = aac_rx_notify_adapter;
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_check_health = aac_rx_check_health;
dev->a_ops.adapter_send = aac_rx_send;
/*
* First clear out all interrupts. Then enable the one's that we
......@@ -437,15 +480,9 @@ int aac_rx_init(struct aac_dev *dev)
if (aac_init_adapter(dev) == NULL)
goto error_irq;
/*
* Start any kernel threads needed
*/
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
if(dev->thread_pid < 0)
{
printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
goto error_kfree;
}
if (dev->new_comm_interface)
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
/*
* Tell the adapter that all is configured, and it can start
* accepting requests
......@@ -453,15 +490,11 @@ int aac_rx_init(struct aac_dev *dev)
aac_rx_start_adapter(dev);
return 0;
error_kfree:
kfree(dev->queues);
error_irq:
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap:
iounmap(dev->regs.rx);
return -1;
}
......@@ -237,29 +237,16 @@ static void aac_sa_interrupt_adapter (struct aac_dev *dev)
static void aac_sa_start_adapter(struct aac_dev *dev)
{
u32 ret;
struct aac_init *init;
/*
* Fill in the remaining pieces of the init.
*/
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
/*
* Tell the adapter we are back and up and running so it will scan its command
* queues and enable our interrupts
*/
dev->irq_mask = (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
/*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
&ret, NULL, NULL, NULL, NULL);
NULL, NULL, NULL, NULL, NULL);
}
/**
......@@ -313,15 +300,6 @@ int aac_sa_init(struct aac_dev *dev)
instance = dev->id;
name = dev->name;
/*
* Map in the registers from the adapter.
*/
if((dev->regs.sa = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
{
printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
goto error_iounmap;
}
/*
* Check to see if the board failed any self tests.
*/
......@@ -377,15 +355,6 @@ int aac_sa_init(struct aac_dev *dev)
if(aac_init_adapter(dev) == NULL)
goto error_irq;
/*
* Start any kernel threads needed
*/
dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
if (dev->thread_pid < 0) {
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
goto error_kfree;
}
/*
* Tell the adapter that all is configure, and it can start
* accepting requests
......@@ -393,16 +362,11 @@ int aac_sa_init(struct aac_dev *dev)
aac_sa_start_adapter(dev);
return 0;
error_kfree:
kfree(dev->queues);
error_irq:
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap:
iounmap(dev->regs.sa);
return -1;
}
......
......@@ -2921,8 +2921,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
*/
static void show_command(Scsi_Cmnd *ptr)
{
printk(KERN_DEBUG "0x%08x: target=%d; lun=%d; cmnd=(",
(unsigned int) ptr, ptr->device->id, ptr->device->lun);
scmd_printk(KERN_DEBUG, ptr, "%p: cmnd=(", ptr);
__scsi_print_command(ptr->cmnd);
......
......@@ -1405,7 +1405,8 @@ static int aha1542_dev_reset(Scsi_Cmnd * SCpnt)
*/
aha1542_out(SCpnt->device->host->io_port, &ahacmd, 1);
printk(KERN_WARNING "aha1542.c: Trying device reset for target %d\n", SCpnt->device->id);
scmd_printk(KERN_WARNING, SCpnt,
"Trying device reset for target\n");
return SUCCESS;
......
......@@ -347,7 +347,7 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
{
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
unchar target = SCpnt->device->id;
unchar target = scmd_id(SCpnt);
struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
unsigned long flags;
void *buff = SCpnt->request_buffer;
......
......@@ -52,6 +52,7 @@ static struct scsi_transport_template *ahd_linux_transport_template = NULL;
#include <linux/mm.h> /* For fetching system memory size */
#include <linux/blkdev.h> /* For block_size() */
#include <linux/delay.h> /* For ssleep/msleep */
#include <linux/device.h>
/*
* Bucket size for counting good commands in between bad ones.
......@@ -397,7 +398,7 @@ ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
/******************************** Macros **************************************/
#define BUILD_SCSIID(ahd, cmd) \
((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id)
(((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id)
/*
* Return a string describing the driver.
......@@ -565,7 +566,7 @@ ahd_linux_slave_configure(struct scsi_device *sdev)
ahd = *((struct ahd_softc **)sdev->host->hostdata);
if (bootverbose)
printf("%s: Slave Configure %d\n", ahd_name(ahd), sdev->id);
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
ahd_linux_device_queue_depth(sdev);
......@@ -684,7 +685,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
ahd_name(ahd), cmd);
#endif
ahd_lock(ahd, &s);
found = ahd_reset_channel(ahd, cmd->device->channel + 'A',
found = ahd_reset_channel(ahd, scmd_channel(cmd) + 'A',
/*initiate reset*/TRUE);
ahd_unlock(ahd, &s);
......@@ -2067,9 +2068,8 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
wait = FALSE;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
printf("%s:%d:%d:%d: Attempting to queue a%s message:",
ahd_name(ahd), cmd->device->channel,
cmd->device->id, cmd->device->lun,
scmd_printk(KERN_INFO, cmd,
"Attempting to queue a%s message:",
flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
printf("CDB:");
......@@ -2093,9 +2093,7 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
* No target device for this command exists,
* so we must not still own the command.
*/
printf("%s:%d:%d:%d: Is not an active device\n",
ahd_name(ahd), cmd->device->channel, cmd->device->id,
cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Is not an active device\n");
retval = SUCCESS;
goto no_cmd;
}
......@@ -2112,8 +2110,9 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
/* Any SCB for this device will do for a target reset */
LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
if (ahd_match_scb(ahd, pending_scb, cmd->device->id,
cmd->device->channel + 'A',
if (ahd_match_scb(ahd, pending_scb,
scmd_id(cmd),
scmd_channel(cmd) + 'A',
CAM_LUN_WILDCARD,
SCB_LIST_NULL, ROLE_INITIATOR) == 0)
break;
......@@ -2121,9 +2120,7 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
}
if (pending_scb == NULL) {
printf("%s:%d:%d:%d: Command not found\n",
ahd_name(ahd), cmd->device->channel, cmd->device->id,
cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Command not found\n");
goto no_cmd;
}
......@@ -2146,9 +2143,7 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
paused = TRUE;
if ((pending_scb->flags & SCB_ACTIVE) == 0) {
printf("%s:%d:%d:%d: Command already completed\n",
ahd_name(ahd), cmd->device->channel, cmd->device->id,
cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Command already completed\n");
goto no_cmd;
}
......@@ -2204,7 +2199,7 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
if (last_phase != P_BUSFREE
&& (SCB_GET_TAG(pending_scb) == active_scbptr
|| (flag == SCB_DEVICE_RESET
&& SCSIID_TARGET(ahd, saved_scsiid) == cmd->device->id))) {
&& SCSIID_TARGET(ahd, saved_scsiid) == scmd_id(cmd)))) {
/*
* We're active on the bus, so assert ATN
......@@ -2214,9 +2209,7 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
pending_scb->flags |= SCB_RECOVERY_SCB|flag;
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
ahd_name(ahd), cmd->device->channel,
cmd->device->id, cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
wait = TRUE;
} else if (disconnected) {
......@@ -2277,9 +2270,7 @@ ahd_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
printf("Device is disconnected, re-queuing SCB\n");
wait = TRUE;
} else {
printf("%s:%d:%d:%d: Unable to deliver message\n",
ahd_name(ahd), cmd->device->channel,
cmd->device->id, cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
retval = FAILED;
goto done;
}
......
......@@ -641,7 +641,7 @@ ahc_linux_slave_configure(struct scsi_device *sdev)
ahc = *((struct ahc_softc **)sdev->host->hostdata);
if (bootverbose)
printf("%s: Slave Configure %d\n", ahc_name(ahc), sdev->id);
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
ahc_linux_device_queue_depth(sdev);
......@@ -686,7 +686,7 @@ ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
u_int channel;
ahc = *((struct ahc_softc **)sdev->host->hostdata);
channel = sdev->channel;
channel = sdev_channel(sdev);
bh = scsi_bios_ptable(bdev);
if (bh) {
......@@ -759,7 +759,7 @@ ahc_linux_bus_reset(struct scsi_cmnd *cmd)
ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
ahc_lock(ahc, &flags);
found = ahc_reset_channel(ahc, cmd->device->channel + 'A',
found = ahc_reset_channel(ahc, scmd_channel(cmd) + 'A',
/*initiate reset*/TRUE);
ahc_unlock(ahc, &flags);
......@@ -2117,9 +2117,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
wait = FALSE;
ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
printf("%s:%d:%d:%d: Attempting to queue a%s message\n",
ahc_name(ahc), cmd->device->channel,
cmd->device->id, cmd->device->lun,
scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
printf("CDB:");
......@@ -2174,8 +2172,8 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
/* Any SCB for this device will do for a target reset */
LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
if (ahc_match_scb(ahc, pending_scb, cmd->device->id,
cmd->device->channel + 'A',
if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
scmd_channel(cmd) + 'A',
CAM_LUN_WILDCARD,
SCB_LIST_NULL, ROLE_INITIATOR) == 0)
break;
......@@ -2183,9 +2181,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
}
if (pending_scb == NULL) {
printf("%s:%d:%d:%d: Command not found\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Command not found\n");
goto no_cmd;
}
......@@ -2207,9 +2203,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
paused = TRUE;
if ((pending_scb->flags & SCB_ACTIVE) == 0) {
printf("%s:%d:%d:%d: Command already completed\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Command already completed\n");
goto no_cmd;
}
......@@ -2266,7 +2260,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
if (last_phase != P_BUSFREE
&& (pending_scb->hscb->tag == active_scb_index
|| (flag == SCB_DEVICE_RESET
&& SCSIID_TARGET(ahc, saved_scsiid) == cmd->device->id))) {
&& SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) {
/*
* We're active on the bus, so assert ATN
......@@ -2276,9 +2270,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
pending_scb->flags |= SCB_RECOVERY_SCB|flag;
ahc_outb(ahc, MSG_OUT, HOST_MSG);
ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
wait = TRUE;
} else if (disconnected) {
......@@ -2344,9 +2336,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
printf("Device is disconnected, re-queuing SCB\n");
wait = TRUE;
} else {
printf("%s:%d:%d:%d: Unable to deliver message\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
cmd->device->lun);
scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
retval = FAILED;
goto done;
}
......
......@@ -297,11 +297,10 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id, struct pt_regs *re
}
workreq = dev->id[c][target_id].curr_req;
#ifdef ED_DBGP
printk(KERN_DEBUG "Channel = %d ID = %d LUN = %d CDB",c,workreq->device->id,workreq->device->lun);
for(l=0;l<workreq->cmd_len;l++)
{
scmd_printk(KERN_DEBUG, workreq, "CDB");
for (l = 0; l < workreq->cmd_len; l++)
printk(KERN_DEBUG " %x",workreq->cmnd[l]);
}
printk("\n");
#endif
tmport = workport + 0x0f;
......@@ -622,10 +621,10 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p,
struct atp_unit *dev;
struct Scsi_Host *host;
c = req_p->device->channel;
c = scmd_channel(req_p);
req_p->sense_buffer[0]=0;
req_p->resid = 0;
if (req_p->device->channel > 1) {
if (scmd_channel(req_p) > 1) {
req_p->result = 0x00040000;
done(req_p);
#ifdef ED_DBGP
......@@ -640,7 +639,7 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p,
m = 1;
m = m << req_p->device->id;
m = m << scmd_id(req_p);
/*
* Fake a timeout for missing targets
......@@ -758,9 +757,9 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
dev->quhd[c] = 0;
}
workreq = dev->quereq[c][dev->quhd[c]];
if (dev->id[c][workreq->device->id].curr_req == 0) {
dev->id[c][workreq->device->id].curr_req = workreq;
dev->last_cmd[c] = workreq->device->id;
if (dev->id[c][scmd_id(workreq)].curr_req == 0) {
dev->id[c][scmd_id(workreq)].curr_req = workreq;
dev->last_cmd[c] = scmd_id(workreq);
goto cmd_subp;
}
dev->quhd[c] = j;
......@@ -787,16 +786,16 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
oktosend:
#ifdef ED_DBGP
printk("OK to Send\n");
printk("CDB");
scmd_printk(KERN_DEBUG, workreq, "CDB");
for(i=0;i<workreq->cmd_len;i++) {
printk(" %x",workreq->cmnd[i]);
}
printk("\nChannel = %d ID = %d LUN = %d\n",c,workreq->device->id,workreq->device->lun);
printk("\n");
#endif
if (dev->dev_id == ATP885_DEVID) {
j = inb(dev->baseport + 0x29) & 0xfe;
outb(j, dev->baseport + 0x29);
dev->r1f[c][workreq->device->id] = 0;
dev->r1f[c][scmd_id(workreq)] = 0;
}
if (workreq->cmnd[0] == READ_CAPACITY) {
......@@ -810,7 +809,7 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
tmport = workport + 0x1b;
j = 0;
target_id = workreq->device->id;
target_id = scmd_id(workreq);
/*
* Wide ?
......@@ -3109,7 +3108,7 @@ static int atp870u_abort(struct scsi_cmnd * SCpnt)
host = SCpnt->device->host;
dev = (struct atp_unit *)&host->hostdata;
c=SCpnt->device->channel;
c = scmd_channel(SCpnt);
printk(" atp870u: abort Channel = %x \n", c);
printk("working=%x last_cmd=%x ", dev->working[c], dev->last_cmd[c]);
printk(" quhdu=%x quendu=%x ", dev->quhd[c], dev->quend[c]);
......
......@@ -940,9 +940,7 @@ static int ch_probe(struct device *dev)
MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
dev, "s%s", ch->name);
printk(KERN_INFO "Attached scsi changer %s "
"at scsi%d, channel %d, id %d, lun %d\n",
ch->name, sd->host->host_no, sd->channel, sd->id, sd->lun);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
spin_lock(&ch_devlist_lock);
list_add_tail(&ch->list,&ch_devlist);
......
......@@ -1389,10 +1389,7 @@ EXPORT_SYMBOL(scsi_print_msg);
void scsi_print_command(struct scsi_cmnd *cmd)
{
/* Assume appended output (i.e. not at start of line) */
printk("scsi%d : destination target %d, lun %d\n",
cmd->device->host->host_no,
cmd->device->id,
cmd->device->lun);
sdev_printk("", cmd->device, "\n");
printk(KERN_INFO " command: ");
scsi_print_cdb(cmd->cmnd, cmd->cmd_len, 0);
}
......
#ifndef CPQFCTS_H
#define CPQFCTS_H
#include "cpqfcTSstructs.h"
// These functions are required by the Linux SCSI layers
extern int cpqfcTS_detect(Scsi_Host_Template *);
extern int cpqfcTS_release(struct Scsi_Host *);
extern const char * cpqfcTS_info(struct Scsi_Host *);
extern int cpqfcTS_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
extern int cpqfcTS_queuecommand(Scsi_Cmnd *, void (* done)(Scsi_Cmnd *));
extern int cpqfcTS_abort(Scsi_Cmnd *);
extern int cpqfcTS_reset(Scsi_Cmnd *, unsigned int);
extern int cpqfcTS_eh_abort(Scsi_Cmnd *Cmnd);
extern int cpqfcTS_eh_device_reset(Scsi_Cmnd *);
extern int cpqfcTS_biosparam(struct scsi_device *, struct block_device *,
sector_t, int[]);
extern int cpqfcTS_ioctl( Scsi_Device *ScsiDev, int Cmnd, void *arg);
#endif /* CPQFCTS_H */
/* Copyright(c) 2000, Compaq Computer Corporation
* Fibre Channel Host Bus Adapter
* 64-bit, 66MHz PCI
* Originally developed and tested on:
* (front): [chip] Tachyon TS HPFC-5166A/1.2 L2C1090 ...
* SP# P225CXCBFIEL6T, Rev XC
* SP# 161290-001, Rev XD
* (back): Board No. 010008-001 A/W Rev X5, FAB REV X5
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* Written by Don Zimmerman
*/
#ifndef CPQFCTSCHIP_H
#define CPQFCTSCHIP_H
#ifndef TACHYON_CHIP_INC
// FC-PH (Physical) specification levels for Login payloads
// NOTE: These are NOT strictly complied with by any FC vendors
#define FC_PH42 0x08
#define FC_PH43 0x09
#define FC_PH3 0x20
#define TACHLITE_TS_RX_SIZE 1024 // max inbound frame size
// "I" prefix is for Include
#define IVENDID 0x00 // word
#define IDEVID 0x02
#define ITLCFGCMD 0x04
#define IMEMBASE 0x18 // Tachyon
#define ITLMEMBASE 0x1C // Tachlite
#define IIOBASEL 0x10 // Tachyon I/O base address, lower 256 bytes
#define IIOBASEU 0x14 // Tachyon I/O base address, upper 256 bytes
#define ITLIOBASEL 0x14 // TachLite I/O base address, lower 256 bytes
#define ITLIOBASEU 0x18 // TachLite I/O base address, upper 256 bytes
#define ITLRAMBASE 0x20 // TL on-board RAM start
#define ISROMBASE 0x24
#define IROMBASE 0x30
#define ICFGCMD 0x04 // PCI config - PCI config access (word)
#define ICFGSTAT 0x06 // PCI status (R - word)
#define IRCTR_WCTR 0x1F2 // ROM control / pre-fetch wait counter
#define IPCIMCTR 0x1F3 // PCI master control register
#define IINTPEND 0x1FD // Interrupt pending (I/O Upper - Tachyon & TL)
#define IINTEN 0x1FE // Interrupt enable (I/O Upper - Tachyon & TL)
#define IINTSTAT 0x1FF // Interrupt status (I/O Upper - Tachyon & TL)
#define IMQ_BASE 0x80
#define IMQ_LENGTH 0x84
#define IMQ_CONSUMER_INDEX 0x88
#define IMQ_PRODUCER_INDEX 0x8C // Tach copies its INDX to bits 0-7 of value
/*
// IOBASE UPPER
#define SFSBQ_BASE 0x00 // single-frame sequences
#define SFSBQ_LENGTH 0x04
#define SFSBQ_PRODUCER_INDEX 0x08
#define SFSBQ_CONSUMER_INDEX 0x0C // (R)
#define SFS_BUFFER_LENGTH 0X10
// SCSI-FCP hardware assists
#define SEST_BASE 0x40 // SSCI Exchange State Table
#define SEST_LENGTH 0x44
#define SCSI_BUFFER_LENGTH 0x48
#define SEST_LINKED_LIST 0x4C
#define TACHYON_My_ID 0x6C
#define TACHYON_CONFIGURATION 0x84 // (R/W) reset val 2
#define TACHYON_CONTROL 0x88
#define TACHYON_STATUS 0x8C // (R)
#define TACHYON_FLUSH_SEST 0x90 // (R/W)
#define TACHYON_EE_CREDIT_TMR 0x94 // (R)
#define TACHYON_BB_CREDIT_TMR 0x98 // (R)
#define TACHYON_RCV_FRAME_ERR 0x9C // (R)
#define FRAME_MANAGER_CONFIG 0xC0 // (R/W)
#define FRAME_MANAGER_CONTROL 0xC4
#define FRAME_MANAGER_STATUS 0xC8 // (R)
#define FRAME_MANAGER_ED_TOV 0xCC
#define FRAME_MANAGER_LINK_ERR1 0xD0 // (R)
#define FRAME_MANAGER_LINK_ERR2 0xD4 // (R)
#define FRAME_MANAGER_TIMEOUT2 0xD8 // (W)
#define FRAME_MANAGER_BB_CREDIT 0xDC // (R)
#define FRAME_MANAGER_WWN_HI 0xE0 // (R/W)
#define FRAME_MANAGER_WWN_LO 0xE4 // (R/W)
#define FRAME_MANAGER_RCV_AL_PA 0xE8 // (R)
#define FRAME_MANAGER_PRIMITIVE 0xEC // {K28.5} byte1 byte2 byte3
*/
#define TL_MEM_ERQ_BASE 0x0 //ERQ Base
#define TL_IO_ERQ_BASE 0x0 //ERQ base
#define TL_MEM_ERQ_LENGTH 0x4 //ERQ Length
#define TL_IO_ERQ_LENGTH 0x4 //ERQ Length
#define TL_MEM_ERQ_PRODUCER_INDEX 0x8 //ERQ Producer Index register
#define TL_IO_ERQ_PRODUCER_INDEX 0x8 //ERQ Producer Index register
#define TL_MEM_ERQ_CONSUMER_INDEX_ADR 0xC //ERQ Consumer Index address register
#define TL_IO_ERQ_CONSUMER_INDEX_ADR 0xC //ERQ Consumer Index address register
#define TL_MEM_ERQ_CONSUMER_INDEX 0xC //ERQ Consumer Index
#define TL_IO_ERQ_CONSUMER_INDEX 0xC //ERQ Consumer Index
#define TL_MEM_SFQ_BASE 0x50 //SFQ Base
#define TL_IO_SFQ_BASE 0x50 //SFQ base
#define TL_MEM_SFQ_LENGTH 0x54 //SFQ Length
#define TL_IO_SFQ_LENGTH 0x54 //SFQ Length
#define TL_MEM_SFQ_CONSUMER_INDEX 0x58 //SFQ Consumer Index
#define TL_IO_SFQ_CONSUMER_INDEX 0x58 //SFQ Consumer Index
#define TL_MEM_IMQ_BASE 0x80 //IMQ Base
#define TL_IO_IMQ_BASE 0x80 //IMQ base
#define TL_MEM_IMQ_LENGTH 0x84 //IMQ Length
#define TL_IO_IMQ_LENGTH 0x84 //IMQ Length
#define TL_MEM_IMQ_CONSUMER_INDEX 0x88 //IMQ Consumer Index
#define TL_IO_IMQ_CONSUMER_INDEX 0x88 //IMQ Consumer Index
#define TL_MEM_IMQ_PRODUCER_INDEX_ADR 0x8C //IMQ Producer Index address register
#define TL_IO_IMQ_PRODUCER_INDEX_ADR 0x8C //IMQ Producer Index address register
#define TL_MEM_SEST_BASE 0x140 //SFQ Base
#define TL_IO_SEST_BASE 0x40 //SFQ base
#define TL_MEM_SEST_LENGTH 0x144 //SFQ Length
#define TL_IO_SEST_LENGTH 0x44 //SFQ Length
#define TL_MEM_SEST_LINKED_LIST 0x14C
#define TL_MEM_SEST_SG_PAGE 0x168 // Extended Scatter/Gather page size
#define TL_MEM_TACH_My_ID 0x16C
#define TL_IO_TACH_My_ID 0x6C //My AL_PA ID
#define TL_MEM_TACH_CONFIG 0x184 //Tachlite Configuration register
#define TL_IO_CONFIG 0x84 //Tachlite Configuration register
#define TL_MEM_TACH_CONTROL 0x188 //Tachlite Control register
#define TL_IO_CTR 0x88 //Tachlite Control register
#define TL_MEM_TACH_STATUS 0x18C //Tachlite Status register
#define TL_IO_STAT 0x8C //Tachlite Status register
#define TL_MEM_FM_CONFIG 0x1C0 //Frame Manager Configuration register
#define TL_IO_FM_CONFIG 0xC0 //Frame Manager Configuration register
#define TL_MEM_FM_CONTROL 0x1C4 //Frame Manager Control
#define TL_IO_FM_CTL 0xC4 //Frame Manager Control
#define TL_MEM_FM_STATUS 0x1C8 //Frame Manager Status
#define TL_IO_FM_STAT 0xC8 //Frame Manager Status
#define TL_MEM_FM_LINK_STAT1 0x1D0 //Frame Manager Link Status 1
#define TL_IO_FM_LINK_STAT1 0xD0 //Frame Manager Link Status 1
#define TL_MEM_FM_LINK_STAT2 0x1D4 //Frame Manager Link Status 2
#define TL_IO_FM_LINK_STAT2 0xD4 //Frame Manager Link Status 2
#define TL_MEM_FM_TIMEOUT2 0x1D8 // (W)
#define TL_MEM_FM_BB_CREDIT0 0x1DC
#define TL_MEM_FM_WWN_HI 0x1E0 //Frame Manager World Wide Name High
#define TL_IO_FM_WWN_HI 0xE0 //Frame Manager World Wide Name High
#define TL_MEM_FM_WWN_LO 0x1E4 //Frame Manager World Wide Name LOW
#define TL_IO_FM_WWN_LO 0xE4 //Frame Manager World Wide Name Low
#define TL_MEM_FM_RCV_AL_PA 0x1E8 //Frame Manager AL_PA Received register
#define TL_IO_FM_ALPA 0xE8 //Frame Manager AL_PA Received register
#define TL_MEM_FM_ED_TOV 0x1CC
#define TL_IO_ROMCTR 0xFA //TL PCI ROM Control Register
#define TL_IO_PCIMCTR 0xFB //TL PCI Master Control Register
#define TL_IO_SOFTRST 0xFC //Tachlite Configuration register
#define TL_MEM_SOFTRST 0x1FC //Tachlite Configuration register
// completion message types (bit 8 set means Interrupt generated)
// CM_Type
#define OUTBOUND_COMPLETION 0
#define ERROR_IDLE_COMPLETION 0x01
#define OUT_HI_PRI_COMPLETION 0x01
#define INBOUND_MFS_COMPLETION 0x02
#define INBOUND_000_COMPLETION 0x03
#define INBOUND_SFS_COMPLETION 0x04 // Tachyon & TachLite
#define ERQ_FROZEN_COMPLETION 0x06 // TachLite
#define INBOUND_C1_TIMEOUT 0x05
#define INBOUND_BUSIED_FRAME 0x06
#define SFS_BUF_WARN 0x07
#define FCP_FROZEN_COMPLETION 0x07 // TachLite
#define MFS_BUF_WARN 0x08
#define IMQ_BUF_WARN 0x09
#define FRAME_MGR_INTERRUPT 0x0A
#define READ_STATUS 0x0B
#define INBOUND_SCSI_DATA_COMPLETION 0x0C
#define INBOUND_FCP_XCHG_COMPLETION 0x0C // TachLite
#define INBOUND_SCSI_DATA_COMMAND 0x0D
#define BAD_SCSI_FRAME 0x0E
#define INB_SCSI_STATUS_COMPLETION 0x0F
#define BUFFER_PROCESSED_COMPLETION 0x11
// FC-AL (Tachyon) Loop Port State Machine defs
// (loop "Up" states)
#define MONITORING 0x0
#define ARBITRATING 0x1
#define ARBITRAT_WON 0x2
#define OPEN 0x3
#define OPENED 0x4
#define XMITTD_CLOSE 0x5
#define RCVD_CLOSE 0x6
#define TRANSFER 0x7
// (loop "Down" states)
#define INITIALIZING 0x8
#define O_I_INIT 0x9
#define O_I_PROTOCOL 0xa
#define O_I_LIP_RCVD 0xb
#define HOST_CONTROL 0xc
#define LOOP_FAIL 0xd
// (no 0xe)
#define OLD_PORT 0xf
#define TACHYON_CHIP_INC
#endif
#endif /* CPQFCTSCHIP_H */
此差异已折叠。
/* Copyright(c) 2000, Compaq Computer Corporation
* Fibre Channel Host Bus Adapter
* 64-bit, 66MHz PCI
* Originally developed and tested on:
* (front): [chip] Tachyon TS HPFC-5166A/1.2 L2C1090 ...
* SP# P225CXCBFIEL6T, Rev XC
* SP# 161290-001, Rev XD
* (back): Board No. 010008-001 A/W Rev X5, FAB REV X5
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* Written by Don Zimmerman
*/
// These functions control the NVRAM I2C hardware on
// non-intelligent Fibre Host Adapters.
// The primary purpose is to read the HBA's NVRAM to get adapter's
// manufactured WWN to copy into Tachyon chip registers
// Orignal source author unknown
#include <linux/types.h>
enum boolean { FALSE, TRUE } ;
#ifndef UCHAR
typedef __u8 UCHAR;
#endif
#ifndef BOOLEAN
typedef __u8 BOOLEAN;
#endif
#ifndef USHORT
typedef __u16 USHORT;
#endif
#ifndef ULONG
typedef __u32 ULONG;
#endif
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <asm/io.h> // struct pt_regs for IRQ handler & Port I/O
#include "cpqfcTSchip.h"
static void tl_i2c_tx_byte( void* GPIOout, UCHAR data );
/*static BOOLEAN tl_write_i2c_page_portion( void* GPIOin, void* GPIOout,
USHORT startOffset, // e.g. 0x2f for WWN start
USHORT count,
UCHAR *buf );
*/
//
// Tachlite GPIO2, GPIO3 (I2C) DEFINES
// The NVRAM chip NM24C03 defines SCL (serial clock) and SDA (serial data)
// GPIO2 drives SDA, and GPIO3 drives SCL
//
// Since Tachlite inverts the state of the GPIO 0-3 outputs, SET writes 0
// and clear writes 1. The input lines (read in TL status) is NOT inverted
// This really helps confuse the code and debugging.
#define SET_DATA_HI 0x0
#define SET_DATA_LO 0x8
#define SET_CLOCK_HI 0x0
#define SET_CLOCK_LO 0x4
#define SENSE_DATA_HI 0x8
#define SENSE_DATA_LO 0x0
#define SENSE_CLOCK_HI 0x4
#define SENSE_CLOCK_LO 0x0
#define SLAVE_READ_ADDRESS 0xA1
#define SLAVE_WRITE_ADDRESS 0xA0
static void i2c_delay(ULONG mstime);
static void tl_i2c_clock_pulse( UCHAR , void* GPIOout);
static UCHAR tl_read_i2c_data( void* );
//-----------------------------------------------------------------------------
//
// Name: I2C_RX_ACK
//
// This routine receives an acknowledge over the I2C bus.
//
//-----------------------------------------------------------------------------
static unsigned short tl_i2c_rx_ack( void* GPIOin, void* GPIOout )
{
unsigned long value;
// do clock pulse, let data line float high
tl_i2c_clock_pulse( SET_DATA_HI, GPIOout );
// slave must drive data low for acknowledge
value = tl_read_i2c_data( GPIOin);
if (value & SENSE_DATA_HI )
return( FALSE );
return( TRUE );
}
//-----------------------------------------------------------------------------
//
// Name: READ_I2C_REG
//
// This routine reads the I2C control register using the global
// IO address stored in gpioreg.
//
//-----------------------------------------------------------------------------
static UCHAR tl_read_i2c_data( void* gpioreg )
{
return( (UCHAR)(readl( gpioreg ) & 0x08L) ); // GPIO3
}
//-----------------------------------------------------------------------------
//
// Name: WRITE_I2C_REG
//
// This routine writes the I2C control register using the global
// IO address stored in gpioreg.
// In Tachlite, we don't want to modify other bits in TL Control reg.
//
//-----------------------------------------------------------------------------
static void tl_write_i2c_reg( void* gpioregOUT, UCHAR value )
{
ULONG temp;
// First read the register and clear out the old bits
temp = readl( gpioregOUT ) & 0xfffffff3L;
// Now or in the new data and send it back out
writel( temp | value, gpioregOUT);
}
//-----------------------------------------------------------------------------
//
// Name: I2C_TX_START
//
// This routine transmits a start condition over the I2C bus.
// 1. Set SCL (clock, GPIO2) HIGH, set SDA (data, GPIO3) HIGH,
// wait 5us to stabilize.
// 2. With SCL still HIGH, drive SDA low. The low transition marks
// the start condition to NM24Cxx (the chip)
// NOTE! In TL control reg., output 1 means chip sees LOW
//
//-----------------------------------------------------------------------------
static unsigned short tl_i2c_tx_start( void* GPIOin, void* GPIOout )
{
unsigned short i;
ULONG value;
if ( !(tl_read_i2c_data(GPIOin) & SENSE_DATA_HI))
{
// start with clock high, let data float high
tl_write_i2c_reg( GPIOout, SET_DATA_HI | SET_CLOCK_HI );
// keep sending clock pulses if slave is driving data line
for (i = 0; i < 10; i++)
{
tl_i2c_clock_pulse( SET_DATA_HI, GPIOout );
if ( tl_read_i2c_data(GPIOin) & SENSE_DATA_HI )
break;
}
// if he's still driving data low after 10 clocks, abort
value = tl_read_i2c_data( GPIOin ); // read status
if (!(value & 0x08) )
return( FALSE );
}
// To START, bring data low while clock high
tl_write_i2c_reg( GPIOout, SET_CLOCK_HI | SET_DATA_LO );
i2c_delay(0);
return( TRUE ); // TX start successful
}
//-----------------------------------------------------------------------------
//
// Name: I2C_TX_STOP
//
// This routine transmits a stop condition over the I2C bus.
//
//-----------------------------------------------------------------------------
static unsigned short tl_i2c_tx_stop( void* GPIOin, void* GPIOout )
{
int i;
for (i = 0; i < 10; i++)
{
// Send clock pulse, drive data line low
tl_i2c_clock_pulse( SET_DATA_LO, GPIOout );
// To STOP, bring data high while clock high
tl_write_i2c_reg( GPIOout, SET_DATA_HI | SET_CLOCK_HI );
// Give the data line time to float high
i2c_delay(0);
// If slave is driving data line low, there's a problem; retry
if ( tl_read_i2c_data(GPIOin) & SENSE_DATA_HI )
return( TRUE ); // TX STOP successful!
}
return( FALSE ); // error
}
//-----------------------------------------------------------------------------
//
// Name: I2C_TX_uchar
//
// This routine transmits a byte across the I2C bus.
//
//-----------------------------------------------------------------------------
static void tl_i2c_tx_byte( void* GPIOout, UCHAR data )
{
UCHAR bit;
for (bit = 0x80; bit; bit >>= 1)
{
if( data & bit )
tl_i2c_clock_pulse( (UCHAR)SET_DATA_HI, GPIOout);
else
tl_i2c_clock_pulse( (UCHAR)SET_DATA_LO, GPIOout);
}
}
//-----------------------------------------------------------------------------
//
// Name: I2C_RX_uchar
//
// This routine receives a byte across the I2C bus.
//
//-----------------------------------------------------------------------------
static UCHAR tl_i2c_rx_byte( void* GPIOin, void* GPIOout )
{
UCHAR bit;
UCHAR data = 0;
for (bit = 0x80; bit; bit >>= 1) {
// do clock pulse, let data line float high
tl_i2c_clock_pulse( SET_DATA_HI, GPIOout );
// read data line
if ( tl_read_i2c_data( GPIOin) & 0x08 )
data |= bit;
}
return (data);
}
//*****************************************************************************
//*****************************************************************************
// Function: read_i2c_nvram
// Arguments: UCHAR count number of bytes to read
// UCHAR *buf area to store the bytes read
// Returns: 0 - failed
// 1 - success
//*****************************************************************************
//*****************************************************************************
unsigned long cpqfcTS_ReadNVRAM( void* GPIOin, void* GPIOout , USHORT count,
UCHAR *buf )
{
unsigned short i;
if( !( tl_i2c_tx_start(GPIOin, GPIOout) ))
return FALSE;
// Select the NVRAM for "dummy" write, to set the address
tl_i2c_tx_byte( GPIOout , SLAVE_WRITE_ADDRESS );
if ( !tl_i2c_rx_ack(GPIOin, GPIOout ) )
return( FALSE );
// Now send the address where we want to start reading
tl_i2c_tx_byte( GPIOout , 0 );
if ( !tl_i2c_rx_ack(GPIOin, GPIOout ) )
return( FALSE );
// Send a repeated start condition and select the
// slave for reading now.
if( tl_i2c_tx_start(GPIOin, GPIOout) )
tl_i2c_tx_byte( GPIOout, SLAVE_READ_ADDRESS );
if ( !tl_i2c_rx_ack(GPIOin, GPIOout) )
return( FALSE );
// this loop will now read out the data and store it
// in the buffer pointed to by buf
for ( i=0; i<count; i++)
{
*buf++ = tl_i2c_rx_byte(GPIOin, GPIOout);
// Send ACK by holding data line low for 1 clock
if ( i < (count-1) )
tl_i2c_clock_pulse( 0x08, GPIOout );
else {
// Don't send ack for final byte
tl_i2c_clock_pulse( SET_DATA_HI, GPIOout );
}
}
tl_i2c_tx_stop(GPIOin, GPIOout);
return( TRUE );
}
//****************************************************************
//
//
//
// routines to set and clear the data and clock bits
//
//
//
//****************************************************************
static void tl_set_clock(void* gpioreg)
{
ULONG ret_val;
ret_val = readl( gpioreg );
ret_val &= 0xffffffFBL; // clear GPIO2 (SCL)
writel( ret_val, gpioreg);
}
static void tl_clr_clock(void* gpioreg)
{
ULONG ret_val;
ret_val = readl( gpioreg );
ret_val |= SET_CLOCK_LO;
writel( ret_val, gpioreg);
}
//*****************************************************************
//
//
// This routine will advance the clock by one period
//
//
//*****************************************************************
static void tl_i2c_clock_pulse( UCHAR value, void* GPIOout )
{
ULONG ret_val;
// clear the clock bit
tl_clr_clock( GPIOout );
i2c_delay(0);
// read the port to preserve non-I2C bits
ret_val = readl( GPIOout );
// clear the data & clock bits
ret_val &= 0xFFFFFFf3;
// write the value passed in...
// data can only change while clock is LOW!
ret_val |= value; // the data
ret_val |= SET_CLOCK_LO; // the clock
writel( ret_val, GPIOout );
i2c_delay(0);
//set clock bit
tl_set_clock( GPIOout);
}
//*****************************************************************
//
//
// This routine returns the 64-bit WWN
//
//
//*****************************************************************
int cpqfcTS_GetNVRAM_data( UCHAR *wwnbuf, UCHAR *buf )
{
ULONG len;
ULONG sub_len;
ULONG ptr_inc;
ULONG i;
ULONG j;
UCHAR *data_ptr;
UCHAR z;
UCHAR name;
UCHAR sub_name;
UCHAR done;
int iReturn=0; // def. 0 offset is failure to find WWN field
data_ptr = (UCHAR *)buf;
done = FALSE;
i = 0;
while ( (i < 128) && (!done) )
{
z = data_ptr[i];\
if ( !(z & 0x80) )
{
len = 1 + (z & 0x07);
name = (z & 0x78) >> 3;
if (name == 0x0F)
done = TRUE;
}
else
{
name = z & 0x7F;
len = 3 + data_ptr[i+1] + (data_ptr[i+2] << 8);
switch (name)
{
case 0x0D:
//
j = i + 3;
//
if ( data_ptr[j] == 0x3b ) {
len = 6;
break;
}
while ( j<(i+len) ) {
sub_name = (data_ptr[j] & 0x3f);
sub_len = data_ptr[j+1] +
(data_ptr[j+2] << 8);
ptr_inc = sub_len + 3;
switch (sub_name)
{
case 0x3C:
memcpy( wwnbuf, &data_ptr[j+3], 8);
iReturn = j+3;
break;
default:
break;
}
j += ptr_inc;
}
break;
default:
break;
}
}
//
i += len;
} // end while
return iReturn;
}
// define a short 5 micro sec delay, and longer (ms) delay
static void i2c_delay(ULONG mstime)
{
ULONG i;
// NOTE: we only expect to use these delays when reading
// our adapter's NVRAM, which happens only during adapter reset.
// Delay technique from "Linux Device Drivers", A. Rubini
// (1st Ed.) pg 137.
// printk(" delay %lx ", mstime);
if( mstime ) // ms delay?
{
// delay technique
for( i=0; i < mstime; i++)
udelay(1000); // 1ms per loop
}
else // 5 micro sec delay
udelay( 5 ); // micro secs
// printk("done\n");
}
此差异已折叠。
// for user apps, make sure data size types are defined
// with
#define CCPQFCTS_IOC_MAGIC 'Z'
typedef struct
{
__u8 bus;
__u8 dev_fn;
__u32 board_id;
} cpqfc_pci_info_struct;
typedef __u32 DriverVer_type;
/*
typedef union
{
struct // Peripheral Unit Device
{
__u8 Bus:6;
__u8 Mode:2; // b00
__u8 Dev;
} PeripDev;
struct // Volume Set Address
{
__u8 DevMSB:6;
__u8 Mode:2; // b01
__u8 DevLSB;
} LogDev;
struct // Logical Unit Device (SCSI-3, SCC-2 defined)
{
__u8 Targ:6;
__u8 Mode:2; // b10
__u8 Dev:5;
__u8 Bus:3;
} LogUnit;
} SCSI3Addr_struct;
typedef struct
{
SCSI3Addr_struct FCP_Nexus;
__u8 cdb[16];
} PassThru_Command_struct;
*/
/* this is nearly duplicated in idashare.h */
typedef struct {
int lc; /* Controller number */
int node; /* Node (box) number */
int ld; /* Logical Drive on this box, if required */
__u32 nexus; /* SCSI Nexus */
void *argp; /* Argument pointer */
} VENDOR_IOCTL_REQ;
typedef struct {
char cdb[16]; /* SCSI CDB for the pass-through */
ushort bus; /* Target bus on the box */
ushort pdrive; /* Physical drive on the box */
int len; /* Length of the data area of the CDB */
int sense_len; /* Length of the sense data */
char sense_data[40]; /* Sense data */
void *bufp; /* Data area for the CDB */
char rw_flag; /* Read CDB or Write CDB */
} cpqfc_passthru_t;
/*
** Defines for the IOCTLS.
*/
#define VENDOR_READ_OPCODE 0x26
#define VENDOR_WRITE_OPCODE 0x27
#define CPQFCTS_GETPCIINFO _IOR( CCPQFCTS_IOC_MAGIC, 1, cpqfc_pci_info_struct)
#define CPQFCTS_GETDRIVVER _IOR( CCPQFCTS_IOC_MAGIC, 9, DriverVer_type)
#define CPQFCTS_SCSI_PASSTHRU _IOWR( CCPQFCTS_IOC_MAGIC,11, VENDOR_IOCTL_REQ)
/* We would rather have equivalent generic, low-level driver agnostic
ioctls that do what CPQFC_IOCTL_FC_TARGET_ADDRESS and
CPQFC_IOCTL_FC_TDR 0x5388 do, but currently, we do not have them,
consequently applications would have to know they are talking to cpqfc. */
/* Used to get Fibre Channel WWN and port_id from device */
// #define CPQFC_IOCTL_FC_TARGET_ADDRESS 0x5387
#define CPQFC_IOCTL_FC_TARGET_ADDRESS \
_IOR( CCPQFCTS_IOC_MAGIC, 13, Scsi_FCTargAddress)
/* Used to invoke Target Defice Reset for Fibre Channel */
// #define CPQFC_IOCTL_FC_TDR 0x5388
#define CPQFC_IOCTL_FC_TDR _IO( CCPQFCTS_IOC_MAGIC, 15)
此差异已折叠。
// Routine to trigger Finisar GTA analyzer. Runs of GPIO2
// NOTE: DEBUG ONLY! Could interfere with FCMNGR/Miniport operation
// since it writes directly to the Tachyon board. This function
// developed for Compaq HBA Tachyon TS v1.2 (Rev X5 PCB)
#include "cpqfcTStrigger.h"
#if TRIGGERABLE_HBA
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <asm/io.h>
void TriggerHBA( void* IOBaseUpper, int Print)
{
__u32 long value;
// get initial value in hopes of not modifying any other GPIO line
IOBaseUpper += 0x188; // TachTL/TS Control reg
value = readl( IOBaseUpper);
// set HIGH to trigger external analyzer (tested on Dolche Finisar 1Gb GTA)
// The Finisar anaylzer triggers on low-to-high TTL transition
value |= 0x01; // set bit 0
writel( value, IOBaseUpper);
if( Print)
printk( " -GPIO0 set- ");
}
#endif
// don't do this unless you have the right hardware!
#define TRIGGERABLE_HBA 0
#if TRIGGERABLE_HBA
void TriggerHBA( void*, int);
#else
#define TriggerHBA(x, y)
#endif
此差异已折叠。
......@@ -976,6 +976,16 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
}
}
static inline void pio_trigger(void)
{
static int feedback_requested;
if (!feedback_requested) {
feedback_requested = 1;
printk(KERN_WARNING "%s: Please, contact <linux-scsi@vger.kernel.org> "
"to help improve support for your system.\n", __FILE__);
}
}
/* Prepare SRB for being sent to Device DCB w/ command *cmd */
static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
......@@ -2320,6 +2330,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
CFG2_WIDEFIFO);
while (DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) != 0x40) {
u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
pio_trigger();
*(srb->virt_addr)++ = byte;
if (debug_enabled(DBG_PIO))
printk(" %02x", byte);
......@@ -2331,6 +2342,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
/* Read the last byte ... */
if (srb->total_xfer_length > 0) {
u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
pio_trigger();
*(srb->virt_addr)++ = byte;
srb->total_xfer_length--;
if (debug_enabled(DBG_PIO))
......@@ -2507,6 +2519,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
if (debug_enabled(DBG_PIO))
printk(" %02x", (unsigned char) *(srb->virt_addr));
pio_trigger();
DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
*(srb->virt_addr)++);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -671,7 +671,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id, struct pt_regs *regs)
outb(0x40 | FIFO_COUNT, Interrupt_Cntl_port);
outb(0x82, SCSI_Cntl_port); /* Bus Enable + Select */
outb(adapter_mask | (1 << current_SC->device->id), SCSI_Data_NoACK_port);
outb(adapter_mask | (1 << scmd_id(current_SC)), SCSI_Data_NoACK_port);
/* Stop arbitration and enable parity */
outb(0x10 | PARITY_MASK, TMC_Cntl_port);
......@@ -683,7 +683,7 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id, struct pt_regs *regs)
status = inb(SCSI_Status_port);
if (!(status & 0x01)) {
/* Try again, for slow devices */
if (fd_mcs_select(shpnt, current_SC->device->id)) {
if (fd_mcs_select(shpnt, scmd_id(current_SC))) {
#if EVERY_ACCESS
printk(" SFAIL ");
#endif
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -70,7 +70,6 @@ struct lpfc_nodelist {
struct timer_list nlp_tmofunc; /* Used for nodev tmo */
struct fc_rport *rport; /* Corresponding FC transport
port structure */
struct lpfc_nodelist *nlp_rpi_hash_next;
struct lpfc_hba *nlp_phba;
struct lpfc_work_evt nodev_timeout_evt;
struct lpfc_work_evt els_retry_evt;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册