提交 c92f222e 编写于 作者: J James Bottomley 提交者: James Bottomley

[SCSI] mptspi: Add transport class Domain Validation

This is the first half of a patch to add the generic domain validation
to mptspi.  It also creates a secondary "virtual" channel for raid
component devices since these are now exported with no_uld_attach.

What Eric and I would have really liked is to export all physical
components on channel 0 and all raid components on channel 1.
Unfortunately, this would result in device renumbering on platforms with
mixed RAID/Physical devices which was considered unacceptable for
userland stability reasons.

Still to be done is to plug back the extra parameter setting and DV
pieces on reset and hotplug.
Signed-off-by: NJames Bottomley <James.Bottomley@SteelEye.com>
上级 3ef0b47e
......@@ -9,6 +9,7 @@ config FUSION_SPI
tristate "Fusion MPT ScsiHost drivers for SPI"
depends on PCI && SCSI
select FUSION
select SCSI_SPI_ATTRS
---help---
SCSI HOST support for a parallel SCSI host adapters.
......
......@@ -1120,65 +1120,6 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
return -1;
}
int
mpt_alt_ioc_wait(MPT_ADAPTER *ioc)
{
int loop_count = 30 * 4; /* Wait 30 seconds */
int status = -1; /* -1 means failed to get board READY */
do {
spin_lock(&ioc->initializing_hba_lock);
if (ioc->initializing_hba_lock_flag == 0) {
ioc->initializing_hba_lock_flag=1;
spin_unlock(&ioc->initializing_hba_lock);
status = 0;
break;
}
spin_unlock(&ioc->initializing_hba_lock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/4);
} while (--loop_count);
return status;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mpt_bringup_adapter - This is a wrapper function for mpt_do_ioc_recovery
* @ioc: Pointer to MPT adapter structure
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
*
* This routine performs all the steps necessary to bring the IOC
* to a OPERATIONAL state.
*
* Special Note: This function was added with spin lock's so as to allow
* the dv(domain validation) work thread to succeed on the other channel
* that maybe occuring at the same time when this function is called.
* Without this lock, the dv would fail when message frames were
* requested during hba bringup on the alternate ioc.
*/
static int
mpt_bringup_adapter(MPT_ADAPTER *ioc, int sleepFlag)
{
int r;
if(ioc->alt_ioc) {
if((r=mpt_alt_ioc_wait(ioc->alt_ioc)!=0))
return r;
}
r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP);
if(ioc->alt_ioc) {
spin_lock(&ioc->alt_ioc->initializing_hba_lock);
ioc->alt_ioc->initializing_hba_lock_flag=0;
spin_unlock(&ioc->alt_ioc->initializing_hba_lock);
}
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* mpt_attach - Install a PCI intelligent MPT adapter.
......@@ -1482,7 +1423,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
*/
mpt_detect_bound_ports(ioc, pdev);
if ((r = mpt_bringup_adapter(ioc, CAN_SLEEP)) != 0){
if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP)) != 0){
printk(KERN_WARNING MYNAM
": WARNING - %s did not initialize properly! (%d)\n",
ioc->name, r);
......@@ -1629,7 +1571,6 @@ mpt_resume(struct pci_dev *pdev)
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
u32 device_state = pdev->current_state;
int recovery_state;
int ii;
printk(MYIOC_s_INFO_FMT
"pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
......@@ -1643,14 +1584,6 @@ mpt_resume(struct pci_dev *pdev)
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
ioc->active = 1;
/* F/W not running */
if(!CHIPREG_READ32(&ioc->chip->Doorbell)) {
/* enable domain validation flags */
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_NEED_DV;
}
}
printk(MYIOC_s_INFO_FMT
"pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
ioc->name,
......@@ -6435,7 +6368,6 @@ EXPORT_SYMBOL(mpt_read_ioc_pg_3);
EXPORT_SYMBOL(mpt_alloc_fw_memory);
EXPORT_SYMBOL(mpt_free_fw_memory);
EXPORT_SYMBOL(mptbase_sas_persist_operation);
EXPORT_SYMBOL(mpt_alt_ioc_wait);
EXPORT_SYMBOL(mptbase_GetFcPortPage0);
......
......@@ -331,6 +331,7 @@ typedef struct _SYSIF_REGS
* VirtDevice - FC LUN device or SCSI target device
*/
typedef struct _VirtTarget {
struct scsi_target *starget;
u8 tflags;
u8 ioc_id;
u8 target_id;
......@@ -343,7 +344,6 @@ typedef struct _VirtTarget {
u8 type; /* byte 0 of Inquiry data */
u32 num_luns;
u32 luns[8]; /* Max LUNs is 256 */
u8 inq_data[8];
} VirtTarget;
typedef struct _VirtDevice {
......@@ -364,6 +364,7 @@ typedef struct _VirtDevice {
#define MPT_TARGET_FLAGS_Q_YES 0x08
#define MPT_TARGET_FLAGS_VALID_56 0x10
#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20
#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40
/*
* /proc/mpt interface
......@@ -447,13 +448,6 @@ typedef struct _mpt_ioctl_events {
* Substructure to store SCSI specific configuration page data
*/
/* dvStatus defines: */
#define MPT_SCSICFG_NEGOTIATE 0x01 /* Negotiate on next IO */
#define MPT_SCSICFG_NEED_DV 0x02 /* Schedule DV */
#define MPT_SCSICFG_DV_PENDING 0x04 /* DV on this physical id pending */
#define MPT_SCSICFG_DV_NOT_DONE 0x08 /* DV has not been performed */
#define MPT_SCSICFG_BLK_NEGO 0x10 /* WriteSDP1 with WDTR and SDTR disabled */
#define MPT_SCSICFG_RELOAD_IOC_PG3 0x20 /* IOC Pg 3 data is obsolete */
/* Args passed to writeSDP1: */
#define MPT_SCSICFG_USE_NVRAM 0x01 /* WriteSDP1 using NVRAM */
#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
......@@ -464,7 +458,6 @@ typedef struct _SpiCfgData {
IOCPage4_t *pIocPg4; /* SEP devices addressing */
dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
int IocPg4Sz; /* IOCPage4 size */
u8 dvStatus[MPT_MAX_SCSI_DEVICES];
u8 minSyncFactor; /* 0xFF if async */
u8 maxSyncOffset; /* 0 if async */
u8 maxBusWidth; /* 0 if narrow, 1 if wide */
......@@ -474,13 +467,11 @@ typedef struct _SpiCfgData {
u8 sdp0version; /* SDP0 version */
u8 sdp0length; /* SDP0 length */
u8 dvScheduled; /* 1 if scheduled */
u8 forceDv; /* 1 to force DV scheduling */
u8 noQas; /* Disable QAS for this adapter */
u8 Saf_Te; /* 1 to force all Processors as
* SAF-TE if Inquiry data length
* is too short to check for SAF-TE
*/
u8 mpt_dv; /* command line option: enhanced=1, basic=0 */
u8 bus_reset; /* 1 to allow bus reset */
u8 rsvd[1];
}SpiCfgData;
......@@ -1033,7 +1024,6 @@ extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
extern int mptbase_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
extern int mpt_alt_ioc_wait(MPT_ADAPTER *ioc);
/*
* Public data decl's...
......
......@@ -114,21 +114,6 @@ typedef struct _internal_cmd {
u8 rsvd;
} INTERNAL_CMD;
typedef struct _negoparms {
u8 width;
u8 offset;
u8 factor;
u8 flags;
} NEGOPARMS;
typedef struct _dv_parameters {
NEGOPARMS max;
NEGOPARMS now;
u8 cmd;
u8 id;
u16 pad1;
} DVPARAMETERS;
/*
* Other private/forward protos...
*/
......@@ -149,28 +134,12 @@ static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 tar
int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
static void mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, u8 lun, char *data, int dlen);
static void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *vtarget, char byte56);
static void mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags);
static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc);
static int mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags);
static void mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, struct scsi_device *sdev);
static void mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *vtarget, struct scsi_device *sdev);
static int mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus);
int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
static void mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
static void mptscsih_domainValidation(void *hd);
static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc);
#endif
void mptscsih_remove(struct pci_dev *);
void mptscsih_shutdown(struct pci_dev *);
......@@ -181,16 +150,6 @@ int mptscsih_resume(struct pci_dev *pdev);
#define SNS_LEN(scp) sizeof((scp)->sense_buffer)
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
/*
* Domain Validation task structure
*/
static DEFINE_SPINLOCK(dvtaskQ_lock);
static int dvtaskQ_active = 0;
static int dvtaskQ_release = 0;
static struct work_struct dvTaskQ_task;
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_add_sge - Place a simple SGE at address pAddr.
......@@ -687,9 +646,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
*/
sc->result = DID_RESET << 16;
/* GEM Workaround. */
if (ioc->bus_type == SPI)
mptscsih_no_negotiate(hd, sc);
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
......@@ -1005,10 +961,6 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
struct Scsi_Host *host = ioc->sh;
MPT_SCSI_HOST *hd;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
int count;
unsigned long flags;
#endif
int sz1;
if(!host) {
......@@ -1021,25 +973,6 @@ mptscsih_remove(struct pci_dev *pdev)
if((hd = (MPT_SCSI_HOST *)host->hostdata) == NULL)
return;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
/* Check DV thread active */
count = 10 * HZ;
spin_lock_irqsave(&dvtaskQ_lock, flags);
if (dvtaskQ_active) {
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
while(dvtaskQ_active && --count)
schedule_timeout_interruptible(1);
} else {
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
}
if (!count)
printk(KERN_ERR MYNAM ": ERROR - DV thread still active!\n");
#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
else
printk(KERN_ERR MYNAM ": DV thread orig %d, count %d\n", 10 * HZ, count);
#endif
#endif
mptscsih_shutdown(pdev);
sz1=0;
......@@ -1127,21 +1060,6 @@ mptscsih_resume(struct pci_dev *pdev)
if(!hd)
return 0;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
{
unsigned long lflags;
spin_lock_irqsave(&dvtaskQ_lock, lflags);
if (!dvtaskQ_active) {
dvtaskQ_active = 1;
spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
INIT_WORK(&dvTaskQ_task,
mptscsih_domainValidation, (void *) hd);
schedule_work(&dvTaskQ_task);
} else {
spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
}
}
#endif
return 0;
}
......@@ -1317,6 +1235,13 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
return SCSI_MLQUEUE_HOST_BUSY;
}
if (vdev->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT &&
mptscsih_raid_id_to_num(hd, SCpnt->device->id) < 0) {
SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt);
return 0;
}
/*
* Put together a MPT SCSI request...
*/
......@@ -1363,6 +1288,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
pScsiReq->TargetID = (u8) vdev->target_id;
pScsiReq->Bus = vdev->bus_id;
pScsiReq->ChainOffset = 0;
if (vdev->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
else
pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
pScsiReq->CDBLength = SCpnt->cmd_len;
pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
......@@ -1411,49 +1339,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
hd->ScsiLookup[my_idx] = SCpnt;
SCpnt->host_scribble = NULL;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
if (hd->ioc->bus_type == SPI) {
int dvStatus = hd->ioc->spi_data.dvStatus[vdev->target_id];
int issueCmd = 1;
if (dvStatus || hd->ioc->spi_data.forceDv) {
if ((dvStatus & MPT_SCSICFG_NEED_DV) ||
(hd->ioc->spi_data.forceDv & MPT_SCSICFG_NEED_DV)) {
unsigned long lflags;
/* Schedule DV if necessary */
spin_lock_irqsave(&dvtaskQ_lock, lflags);
if (!dvtaskQ_active) {
dvtaskQ_active = 1;
spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
INIT_WORK(&dvTaskQ_task, mptscsih_domainValidation, (void *) hd);
schedule_work(&dvTaskQ_task);
} else {
spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
}
hd->ioc->spi_data.forceDv &= ~MPT_SCSICFG_NEED_DV;
}
/* Trying to do DV to this target, extend timeout.
* Wait to issue until flag is clear
*/
if (dvStatus & MPT_SCSICFG_DV_PENDING) {
mod_timer(&SCpnt->eh_timeout, jiffies + 40 * HZ);
issueCmd = 0;
}
/* Set the DV flags.
*/
if (dvStatus & MPT_SCSICFG_DV_NOT_DONE)
mptscsih_set_dvflags(hd, SCpnt);
if (!issueCmd)
goto fail;
}
}
#endif
mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf);
dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
hd->ioc->name, SCpnt, mf, my_idx));
......@@ -2218,6 +2103,24 @@ mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
return 0;
}
int
mptscsih_raid_id_to_num(MPT_SCSI_HOST *hd, uint physdiskid)
{
int i;
if (!hd->ioc->raid_data.isRaid || !hd->ioc->raid_data.pIocPg3)
return -ENXIO;
for (i = 0; i < hd->ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
if (physdiskid ==
hd->ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
return hd->ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum;
}
return -ENXIO;
}
EXPORT_SYMBOL(mptscsih_raid_id_to_num);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* OS entry point to allow host driver to alloc memory
......@@ -2233,6 +2136,7 @@ mptscsih_target_alloc(struct scsi_target *starget)
if (!vtarget)
return -ENOMEM;
starget->hostdata = vtarget;
vtarget->starget = starget;
return 0;
}
......@@ -2266,6 +2170,7 @@ mptscsih_slave_alloc(struct scsi_device *sdev)
starget = scsi_target(sdev);
vtarget = starget->hostdata;
vdev->vtarget = vtarget;
if (vtarget->num_luns == 0) {
......@@ -2274,15 +2179,12 @@ mptscsih_slave_alloc(struct scsi_device *sdev)
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
vtarget->target_id = sdev->id;
vtarget->bus_id = sdev->channel;
if (hd->ioc->bus_type == SPI) {
if (hd->ioc->raid_data.isRaid & (1 << sdev->id)) {
if (hd->ioc->bus_type == SPI && sdev->channel == 0 &&
hd->ioc->raid_data.isRaid & (1 << sdev->id)) {
vtarget->raidVolume = 1;
ddvtprintk((KERN_INFO
"RAID Volume @ id %d\n", sdev->id));
}
} else {
vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
}
}
vtarget->num_luns++;
return 0;
......@@ -2321,19 +2223,6 @@ mptscsih_slave_destroy(struct scsi_device *sdev)
vtarget->luns[0] &= ~(1 << vdevice->lun);
vtarget->num_luns--;
if (vtarget->num_luns == 0) {
mptscsih_negotiate_to_asyn_narrow(hd, vdevice);
if (hd->ioc->bus_type == SPI) {
if (mptscsih_is_phys_disk(hd->ioc, vtarget->target_id)) {
hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
} else {
hd->ioc->spi_data.dvStatus[vtarget->target_id] =
MPT_SCSICFG_NEGOTIATE;
if (!hd->negoNvram) {
hd->ioc->spi_data.dvStatus[vtarget->target_id] |=
MPT_SCSICFG_DV_NOT_DONE;
}
}
}
hd->Targets[sdev->id] = NULL;
}
mptscsih_synchronize_cache(hd, vdevice);
......@@ -2362,18 +2251,13 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
vtarget = starget->hostdata;
if (hd->ioc->bus_type == SPI) {
if (vtarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) {
if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
max_depth = 1;
else if (((vtarget->inq_data[0] & 0x1f) == 0x00) &&
(vtarget->minSyncFactor <= MPT_ULTRA160 ))
else if (sdev->type == TYPE_DISK &&
vtarget->minSyncFactor <= MPT_ULTRA160)
max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
else
max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
} else {
/* error case - No Inq. Data */
max_depth = 1;
}
} else
max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
......@@ -2427,8 +2311,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
lun_index = (vdevice->lun >> 5); /* 32 luns per lun_index */
indexed_lun = (vdevice->lun % 32);
vtarget->luns[lun_index] |= (1 << indexed_lun);
mptscsih_initTarget(hd, vtarget, sdev->lun, sdev->inquiry,
sdev->inquiry_len );
mptscsih_initTarget(hd, vtarget, sdev);
mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
dsprintk((MYIOC_s_INFO_FMT
......@@ -2597,10 +2480,6 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/* 4. Renegotiate to all devices, if SPI
*/
if (ioc->bus_type == SPI) {
dnegoprintk(("writeSDP1: ALL_IDS USE_NVRAM\n"));
mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM);
}
/* 5. Enable new commands to be posted
*/
......@@ -2624,13 +2503,6 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
hd->cmdPtr = NULL;
}
/* 7. SPI: Set flag to force DV and re-read IOC Page 3
*/
if (ioc->bus_type == SPI) {
ioc->spi_data.forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
ddvtprintk(("Set reload IOC Pg3 Flag\n"));
}
/* 7. FC: Rescan for blocked rports which might have returned.
*/
else if (ioc->bus_type == FC) {
......@@ -2699,18 +2571,7 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
break;
case MPI_EVENT_INTEGRATED_RAID: /* 0B */
{
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
pMpiEventDataRaid_t pRaidEventData =
(pMpiEventDataRaid_t) pEvReply->Data;
/* Domain Validation Needed */
if (ioc->bus_type == SPI &&
pRaidEventData->ReasonCode ==
MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
#endif
break;
}
case MPI_EVENT_NONE: /* 00 */
case MPI_EVENT_LOG_DATA: /* 01 */
......@@ -2729,9 +2590,7 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
* mptscsih_initTarget - Target, LUN alloc/free functionality.
* @hd: Pointer to MPT_SCSI_HOST structure
* @vtarget: per target private data
* @lun: SCSI LUN id
* @data: Pointer to data
* @dlen: Number of INQUIRY bytes
* @sdev: SCSI device
*
* NOTE: It's only SAFE to call this routine if data points to
* sane & valid STANDARD INQUIRY data!
......@@ -2741,98 +2600,46 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
*
*/
static void
mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, u8 lun, char *data, int dlen)
mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget,
struct scsi_device *sdev)
{
SpiCfgData *pSpi;
char data_56;
int inq_len;
dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
hd->ioc->name, vtarget->bus_id, vtarget->target_id, lun, hd));
/*
* If the peripheral qualifier filter is enabled then if the target reports a 0x1
* (i.e. The targer is capable of supporting the specified peripheral device type
* on this logical unit; however, the physical device is not currently connected
* to this logical unit) it will be converted to a 0x3 (i.e. The target is not
* capable of supporting a physical device on this logical unit). This is to work
* around a bug in th emid-layer in some distributions in which the mid-layer will
* continue to try to communicate to the LUN and evntually create a dummy LUN.
*/
if (hd->mpt_pq_filter && dlen && (data[0] & 0xE0))
data[0] |= 0x40;
/* Is LUN supported? If so, upper 2 bits will be 0
* in first byte of inquiry data.
*/
if (data[0] & 0xe0)
if (sdev->inq_periph_qual != 0)
return;
if (vtarget == NULL)
return;
if (data)
vtarget->type = data[0];
vtarget->type = sdev->type;
if (hd->ioc->bus_type != SPI)
return;
if ((data[0] == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
if ((sdev->type == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
/* Treat all Processors as SAF-TE if
* command line option is set */
vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
mptscsih_writeIOCPage4(hd, vtarget->target_id, vtarget->bus_id);
}else if ((data[0] == TYPE_PROCESSOR) &&
}else if ((sdev->type == TYPE_PROCESSOR) &&
!(vtarget->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
if ( dlen > 49 ) {
vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
if ( data[44] == 'S' &&
data[45] == 'A' &&
data[46] == 'F' &&
data[47] == '-' &&
data[48] == 'T' &&
data[49] == 'E' ) {
if (sdev->inquiry_len > 49 ) {
if (sdev->inquiry[44] == 'S' &&
sdev->inquiry[45] == 'A' &&
sdev->inquiry[46] == 'F' &&
sdev->inquiry[47] == '-' &&
sdev->inquiry[48] == 'T' &&
sdev->inquiry[49] == 'E' ) {
vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
mptscsih_writeIOCPage4(hd, vtarget->target_id, vtarget->bus_id);
}
}
}
if (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) {
inq_len = dlen < 8 ? dlen : 8;
memcpy (vtarget->inq_data, data, inq_len);
/* If have not done DV, set the DV flag.
*/
pSpi = &hd->ioc->spi_data;
if ((data[0] == TYPE_TAPE) || (data[0] == TYPE_PROCESSOR)) {
if (pSpi->dvStatus[vtarget->target_id] & MPT_SCSICFG_DV_NOT_DONE)
pSpi->dvStatus[vtarget->target_id] |= MPT_SCSICFG_NEED_DV;
}
vtarget->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
data_56 = 0x0F; /* Default to full capabilities if Inq data length is < 57 */
if (dlen > 56) {
if ( (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_56))) {
/* Update the target capabilities
*/
data_56 = data[56];
vtarget->tflags |= MPT_TARGET_FLAGS_VALID_56;
}
}
mptscsih_setTargetNegoParms(hd, vtarget, data_56);
} else {
/* Initial Inquiry may not request enough data bytes to
* obtain byte 57. DV will; if target doesn't return
* at least 57 bytes, data[56] will be zero. */
if (dlen > 56) {
if ( (!(vtarget->tflags & MPT_TARGET_FLAGS_VALID_56))) {
/* Update the target capabilities
*/
data_56 = data[56];
vtarget->tflags |= MPT_TARGET_FLAGS_VALID_56;
mptscsih_setTargetNegoParms(hd, vtarget, data_56);
}
}
}
mptscsih_setTargetNegoParms(hd, vtarget, sdev);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
......@@ -2842,66 +2649,51 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, u8 lun, char *data,
*
*/
static void
mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, char byte56)
mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
struct scsi_device *sdev)
{
SpiCfgData *pspi_data = &hd->ioc->spi_data;
int id = (int) target->target_id;
int nvram;
VirtTarget *vtarget;
int ii;
u8 width = MPT_NARROW;
u8 factor = MPT_ASYNC;
u8 offset = 0;
u8 version, nfactor;
u8 nfactor;
u8 noQas = 1;
target->negoFlags = pspi_data->noQas;
/* noQas == 0 => device supports QAS. Need byte 56 of Inq to determine
* support. If available, default QAS to off and allow enabling.
* If not available, default QAS to on, turn off for non-disks.
*/
/* noQas == 0 => device supports QAS. */
/* Set flags based on Inquiry data
*/
version = target->inq_data[2] & 0x07;
if (version < 2) {
if (sdev->scsi_level < SCSI_2) {
width = 0;
factor = MPT_ULTRA2;
offset = pspi_data->maxSyncOffset;
target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
} else {
if (target->inq_data[7] & 0x20) {
if (scsi_device_wide(sdev)) {
width = 1;
}
if (target->inq_data[7] & 0x10) {
if (scsi_device_sync(sdev)) {
factor = pspi_data->minSyncFactor;
if (target->tflags & MPT_TARGET_FLAGS_VALID_56) {
/* bits 2 & 3 show Clocking support */
if ((byte56 & 0x0C) == 0)
if (!scsi_device_dt(sdev))
factor = MPT_ULTRA2;
else {
if ((byte56 & 0x03) == 0)
if (!scsi_device_ius(sdev) &&
!scsi_device_qas(sdev))
factor = MPT_ULTRA160;
else {
factor = MPT_ULTRA320;
if (byte56 & 0x02)
{
if (scsi_device_qas(sdev)) {
ddvtprintk((KERN_INFO "Enabling QAS due to byte56=%02x on id=%d!\n", byte56, id));
noQas = 0;
}
if (target->inq_data[0] == TYPE_TAPE) {
if (byte56 & 0x01)
if (sdev->type == TYPE_TAPE &&
scsi_device_ius(sdev))
target->negoFlags |= MPT_TAPE_NEGO_IDP;
}
}
}
} else {
ddvtprintk((KERN_INFO "Enabling QAS on id=%d due to ~TARGET_FLAGS_VALID_56!\n", id));
noQas = 0;
}
offset = pspi_data->maxSyncOffset;
/* If RAID, never disable QAS
......@@ -2919,7 +2711,7 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, char byte56)
}
}
if ( (target->inq_data[7] & 0x02) == 0) {
if (!sdev->tagged_supported) {
target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
}
......@@ -2977,12 +2769,6 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, char byte56)
if ( factor > MPT_ULTRA320 )
noQas = 0;
/* GEM, processor WORKAROUND
*/
if ((target->inq_data[0] == TYPE_PROCESSOR) || (target->inq_data[0] > 0x08)) {
target->negoFlags |= (MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC);
pspi_data->dvStatus[id] |= MPT_SCSICFG_BLK_NEGO;
} else {
if (noQas && (pspi_data->noQas == 0)) {
pspi_data->noQas |= MPT_TARGET_NO_NEGO_QAS;
target->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
......@@ -2991,41 +2777,10 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, char byte56)
*/
ddvtprintk((KERN_INFO "Disabling QAS due to noQas=%02x on id=%d!\n", noQas, id));
for (ii = 0; ii < id; ii++) {
if ( (vtarget = hd->Targets[ii]) ) {
vtarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
mptscsih_writeSDP1(hd, 0, ii, vtarget->negoFlags);
}
}
}
}
/* Write SDP1 on this I/O to this target */
if (pspi_data->dvStatus[id] & MPT_SCSICFG_NEGOTIATE) {
ddvtprintk((KERN_INFO "MPT_SCSICFG_NEGOTIATE on id=%d!\n", id));
mptscsih_writeSDP1(hd, 0, id, hd->negoNvram);
pspi_data->dvStatus[id] &= ~MPT_SCSICFG_NEGOTIATE;
} else if (pspi_data->dvStatus[id] & MPT_SCSICFG_BLK_NEGO) {
ddvtprintk((KERN_INFO "MPT_SCSICFG_BLK_NEGO on id=%d!\n", id));
mptscsih_writeSDP1(hd, 0, id, MPT_SCSICFG_BLK_NEGO);
pspi_data->dvStatus[id] &= ~MPT_SCSICFG_BLK_NEGO;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* If no Target, bus reset on 1st I/O. Set the flag to
* prevent any future negotiations to this device.
*/
static void
mptscsih_no_negotiate(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc)
{
VirtDevice *vdev;
if ((vdev = sc->device->hostdata) != NULL)
hd->ioc->spi_data.dvStatus[vdev->target_id] |= MPT_SCSICFG_BLK_NEGO;
return;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
......@@ -3076,207 +2831,6 @@ mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr,
return;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptscsih_writeSDP1 - write SCSI Device Page 1
* @hd: Pointer to a SCSI Host Strucutre
* @portnum: IOC port number
* @target_id: writeSDP1 for single ID
* @flags: MPT_SCSICFG_ALL_IDS, MPT_SCSICFG_USE_NVRAM, MPT_SCSICFG_BLK_NEGO
*
* Return: -EFAULT if read of config page header fails
* or 0 if success.
*
* Remark: If a target has been found, the settings from the
* target structure are used, else the device is set
* to async/narrow.
*
* Remark: Called during init and after a FW reload.
* Remark: We do not wait for a return, write pages sequentially.
*/
static int
mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
{
MPT_ADAPTER *ioc = hd->ioc;
Config_t *pReq;
SCSIDevicePage1_t *pData;
VirtTarget *vtarget=NULL;
MPT_FRAME_HDR *mf;
dma_addr_t dataDma;
u16 req_idx;
u32 frameOffset;
u32 requested, configuration, flagsLength;
int ii, nvram;
int id = 0, maxid = 0;
u8 width;
u8 factor;
u8 offset;
u8 bus = 0;
u8 negoFlags;
u8 maxwidth, maxoffset, maxfactor;
if (ioc->spi_data.sdp1length == 0)
return 0;
if (flags & MPT_SCSICFG_ALL_IDS) {
id = 0;
maxid = ioc->sh->max_id - 1;
} else if (ioc->sh) {
id = target_id;
maxid = min_t(int, id, ioc->sh->max_id - 1);
}
for (; id <= maxid; id++) {
if (id == ioc->pfacts[portnum].PortSCSIID)
continue;
/* Use NVRAM to get adapter and target maximums
* Data over-riden by target structure information, if present
*/
maxwidth = ioc->spi_data.maxBusWidth;
maxoffset = ioc->spi_data.maxSyncOffset;
maxfactor = ioc->spi_data.minSyncFactor;
if (ioc->spi_data.nvram && (ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
nvram = ioc->spi_data.nvram[id];
if (maxwidth)
maxwidth = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
if (maxoffset > 0) {
maxfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
if (maxfactor == 0) {
/* Key for async */
maxfactor = MPT_ASYNC;
maxoffset = 0;
} else if (maxfactor < ioc->spi_data.minSyncFactor) {
maxfactor = ioc->spi_data.minSyncFactor;
}
} else
maxfactor = MPT_ASYNC;
}
/* Set the negotiation flags.
*/
negoFlags = ioc->spi_data.noQas;
if (!maxwidth)
negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
if (!maxoffset)
negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
if (flags & MPT_SCSICFG_USE_NVRAM) {
width = maxwidth;
factor = maxfactor;
offset = maxoffset;
} else {
width = 0;
factor = MPT_ASYNC;
offset = 0;
//negoFlags = 0;
//negoFlags = MPT_TARGET_NO_NEGO_SYNC;
}
/* If id is not a raid volume, get the updated
* transmission settings from the target structure.
*/
if (hd->Targets && (vtarget = hd->Targets[id]) && !vtarget->raidVolume) {
width = vtarget->maxWidth;
factor = vtarget->minSyncFactor;
offset = vtarget->maxOffset;
negoFlags = vtarget->negoFlags;
}
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
/* Force to async and narrow if DV has not been executed
* for this ID
*/
if ((hd->ioc->spi_data.dvStatus[id] & MPT_SCSICFG_DV_NOT_DONE) != 0) {
width = 0;
factor = MPT_ASYNC;
offset = 0;
}
#endif
if (flags & MPT_SCSICFG_BLK_NEGO)
negoFlags |= MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
mptscsih_setDevicePage1Flags(width, factor, offset,
&requested, &configuration, negoFlags);
dnegoprintk(("writeSDP1: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
target_id, width, factor, offset, negoFlags, requested, configuration));
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dfailprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n",
ioc->name));
return -EAGAIN;
}
ddvprintk((MYIOC_s_INFO_FMT "WriteSDP1 (mf=%p, id=%d, req=0x%x, cfg=0x%x)\n",
hd->ioc->name, mf, id, requested, configuration));
/* Set the request and the data pointers.
* Request takes: 36 bytes (32 bit SGE)
* SCSI Device Page 1 requires 16 bytes
* 40 + 16 <= size of SCSI IO Request = 56 bytes
* and MF size >= 64 bytes.
* Place data at end of MF.
*/
pReq = (Config_t *)mf;
req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
frameOffset = ioc->req_sz - sizeof(SCSIDevicePage1_t);
pData = (SCSIDevicePage1_t *)((u8 *) mf + frameOffset);
dataDma = ioc->req_frames_dma + (req_idx * ioc->req_sz) + frameOffset;
/* Complete the request frame (same for all requests).
*/
pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
pReq->Reserved = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_CONFIG;
pReq->ExtPageLength = 0;
pReq->ExtPageType = 0;
pReq->MsgFlags = 0;
for (ii=0; ii < 8; ii++) {
pReq->Reserved2[ii] = 0;
}
pReq->Header.PageVersion = ioc->spi_data.sdp1version;
pReq->Header.PageLength = ioc->spi_data.sdp1length;
pReq->Header.PageNumber = 1;
pReq->Header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
pReq->PageAddress = cpu_to_le32(id | (bus << 8 ));
/* Add a SGE to the config request.
*/
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | ioc->spi_data.sdp1length * 4;
mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
/* Set up the common data portion
*/
pData->Header.PageVersion = pReq->Header.PageVersion;
pData->Header.PageLength = pReq->Header.PageLength;
pData->Header.PageNumber = pReq->Header.PageNumber;
pData->Header.PageType = pReq->Header.PageType;
pData->RequestedParameters = cpu_to_le32(requested);
pData->Reserved = 0;
pData->Configuration = cpu_to_le32(configuration);
dprintk((MYIOC_s_INFO_FMT
"write SDP1: id %d pgaddr 0x%x req 0x%x config 0x%x\n",
ioc->name, id, (id | (bus<<8)),
requested, configuration));
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptscsih_writeIOCPage4 - write IOC Page 4
* @hd: Pointer to a SCSI Host Structure
......@@ -3465,6 +3019,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
completionCode = MPT_SCANDV_GOOD;
else
completionCode = MPT_SCANDV_SOME_ERROR;
memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense));
} else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
u8 *sense_data;
......@@ -3578,78 +3133,6 @@ mptscsih_timer_expired(unsigned long data)
return;
}
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptscsih_do_raid - Format and Issue a RAID volume request message.
* @hd: Pointer to scsi host structure
* @action: What do be done.
* @id: Logical target id.
* @bus: Target locations bus.
*
* Returns: < 0 on a fatal error
* 0 on success
*
* Remark: Wait to return until reply processed by the ISR.
*/
static int
mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io)
{
MpiRaidActionRequest_t *pReq;
MPT_FRAME_HDR *mf;
int in_isr;
in_isr = in_interrupt();
if (in_isr) {
dprintk((MYIOC_s_WARN_FMT "Internal raid request not allowed in ISR context!\n",
hd->ioc->name));
return -EPERM;
}
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(hd->ioc->InternalCtx, hd->ioc)) == NULL) {
ddvprintk((MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
hd->ioc->name));
return -EAGAIN;
}
pReq = (MpiRaidActionRequest_t *)mf;
pReq->Action = action;
pReq->Reserved1 = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_RAID_ACTION;
pReq->VolumeID = io->id;
pReq->VolumeBus = io->bus;
pReq->PhysDiskNum = io->physDiskNum;
pReq->MsgFlags = 0;
pReq->Reserved2 = 0;
pReq->ActionDataWord = 0; /* Reserved for this action */
//pReq->ActionDataSGE = 0;
mpt_add_sge((char *)&pReq->ActionDataSGE,
MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action %x id %d\n",
hd->ioc->name, action, io->id));
hd->pLocal = NULL;
hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
hd->scandv_wait_done = 0;
/* Save cmd pointer, for resource free if timeout or
* FW reload occurs
*/
hd->cmdPtr = mf;
add_timer(&hd->timer);
mpt_put_msg_frame(hd->ioc->InternalCtx, hd->ioc, mf);
wait_event(hd->scandv_waitq, hd->scandv_wait_done);
if ((hd->pLocal == NULL) || (hd->pLocal->completion != MPT_SCANDV_GOOD))
return -1;
return 0;
}
#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
......@@ -3903,106 +3386,19 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_negotiate_to_asyn_narrow - Restore devices to default state
* mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
* @hd: Pointer to a SCSI HOST structure
* @vtarget: per device private data
* @lun: lun
*
* Uses the ISR, but with special processing.
* MUST be single-threaded.
*
*/
static void
mptscsih_negotiate_to_asyn_narrow(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
VirtTarget *vtarget = vdevice->vtarget;
MPT_ADAPTER *ioc= hd->ioc;
SCSIDevicePage1_t *pcfg1Data;
CONFIGPARMS cfg;
dma_addr_t cfg1_dma_addr;
ConfigPageHeader_t header;
int id;
int requested, configuration, data,i;
u8 flags, factor;
if ((ioc->bus_type != SPI) ||
(!vdevice->configured_lun))
return;
if (!ioc->spi_data.sdp1length)
return;
pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev,
ioc->spi_data.sdp1length * 4, &cfg1_dma_addr);
if (pcfg1Data == NULL)
return;
header.PageVersion = ioc->spi_data.sdp1version;
header.PageLength = ioc->spi_data.sdp1length;
header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.cfghdr.hdr = &header;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
cfg.timeout = 0;
if (vtarget->raidVolume && ioc->raid_data.pIocPg3) {
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
id = ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID;
flags = hd->ioc->spi_data.noQas;
if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
data = hd->ioc->spi_data.nvram[id];
if (data & MPT_NVRAM_WIDE_DISABLE)
flags |= MPT_TARGET_NO_NEGO_WIDE;
factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
if ((factor == 0) || (factor == MPT_ASYNC))
flags |= MPT_TARGET_NO_NEGO_SYNC;
}
mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
&configuration, flags);
dnegoprintk(("nego asyn narrow: id=%d width=0 factor=MPT_ASYNC "
"offset=0 negoFlags=%x request=%x config=%x\n",
id, flags, requested, configuration));
pcfg1Data->RequestedParameters = cpu_to_le32(requested);
pcfg1Data->Reserved = 0;
pcfg1Data->Configuration = cpu_to_le32(configuration);
cfg.pageAddr = (vtarget->bus_id<<8) | id;
mpt_config(hd->ioc, &cfg);
}
} else {
flags = vtarget->negoFlags;
mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
&configuration, flags);
dnegoprintk(("nego asyn narrow: id=%d width=0 factor=MPT_ASYNC "
"offset=0 negoFlags=%x request=%x config=%x\n",
vtarget->target_id, flags, requested, configuration));
pcfg1Data->RequestedParameters = cpu_to_le32(requested);
pcfg1Data->Reserved = 0;
pcfg1Data->Configuration = cpu_to_le32(configuration);
cfg.pageAddr = (vtarget->bus_id<<8) | vtarget->target_id;
mpt_config(hd->ioc, &cfg);
}
if (pcfg1Data)
pci_free_consistent(ioc->pcidev, header.PageLength * 4, pcfg1Data, cfg1_dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
* @hd: Pointer to a SCSI HOST structure
* @vtarget: per device private data
* @lun: lun
*
* Uses the ISR, but with special processing.
* MUST be single-threaded.
*
*/
static void
mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
INTERNAL_CMD iocmd;
INTERNAL_CMD iocmd;
/* Following parameters will not change
* in this routine.
......@@ -4018,1633 +3414,11 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
iocmd.id = vdevice->target_id;
iocmd.lun = (u8)vdevice->lun;
if ((vdevice->vtarget->type & TYPE_DISK) &&
if ((vdevice->vtarget->type == TYPE_DISK) &&
(vdevice->configured_lun))
mptscsih_do_cmd(hd, &iocmd);
}
/* Search IOC page 3 to determine if this is hidden physical disk
*/
static int
mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
{
int i;
if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
return 0;
for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
return 1;
}
return 0;
}
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_domainValidation - Top level handler for domain validation.
* @hd: Pointer to MPT_SCSI_HOST structure.
*
* Uses the ISR, but with special processing.
* Called from schedule, should not be in interrupt mode.
* While thread alive, do dv for all devices needing dv
*
* Return: None.
*/
static void
mptscsih_domainValidation(void *arg)
{
MPT_SCSI_HOST *hd;
MPT_ADAPTER *ioc;
unsigned long flags;
int id, maxid, dvStatus, did;
int ii, isPhysDisk;
spin_lock_irqsave(&dvtaskQ_lock, flags);
dvtaskQ_active = 1;
if (dvtaskQ_release) {
dvtaskQ_active = 0;
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
return;
}
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
/* For this ioc, loop through all devices and do dv to each device.
* When complete with this ioc, search through the ioc list, and
* for each scsi ioc found, do dv for all devices. Exit when no
* device needs dv.
*/
did = 1;
while (did) {
did = 0;
list_for_each_entry(ioc, &ioc_list, list) {
spin_lock_irqsave(&dvtaskQ_lock, flags);
if (dvtaskQ_release) {
dvtaskQ_active = 0;
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
return;
}
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
msleep(250);
/* DV only to SPI adapters */
if (ioc->bus_type != SPI)
continue;
/* Make sure everything looks ok */
if (ioc->sh == NULL)
continue;
hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
if (hd == NULL)
continue;
if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
mpt_read_ioc_pg_3(ioc);
if (ioc->raid_data.pIocPg3) {
Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
ioc->spi_data.dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
pPDisk++;
numPDisk--;
}
}
ioc->spi_data.forceDv &= ~MPT_SCSICFG_RELOAD_IOC_PG3;
}
maxid = min_t(int, ioc->sh->max_id, MPT_MAX_SCSI_DEVICES);
for (id = 0; id < maxid; id++) {
spin_lock_irqsave(&dvtaskQ_lock, flags);
if (dvtaskQ_release) {
dvtaskQ_active = 0;
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
return;
}
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
dvStatus = hd->ioc->spi_data.dvStatus[id];
if (dvStatus & MPT_SCSICFG_NEED_DV) {
did++;
hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_DV_PENDING;
hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_NEED_DV;
msleep(250);
/* If hidden phys disk, block IO's to all
* raid volumes
* else, process normally
*/
isPhysDisk = mptscsih_is_phys_disk(ioc, id);
if (isPhysDisk) {
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
if (hd->ioc->raid_data.isRaid & (1 << ii)) {
hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
}
}
}
if(mpt_alt_ioc_wait(hd->ioc)!=0) {
ddvprintk((MYIOC_s_WARN_FMT "alt_ioc busy!\n",
hd->ioc->name));
continue;
}
if (mptscsih_doDv(hd, 0, id) == 1) {
/* Untagged device was busy, try again
*/
hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_NEED_DV;
hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_PENDING;
} else {
/* DV is complete. Clear flags.
*/
hd->ioc->spi_data.dvStatus[id] &= ~(MPT_SCSICFG_DV_NOT_DONE | MPT_SCSICFG_DV_PENDING);
}
spin_lock(&hd->ioc->initializing_hba_lock);
hd->ioc->initializing_hba_lock_flag=0;
spin_unlock(&hd->ioc->initializing_hba_lock);
if (isPhysDisk) {
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
if (hd->ioc->raid_data.isRaid & (1 << ii)) {
hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
}
}
}
if (hd->ioc->spi_data.noQas)
mptscsih_qas_check(hd, id);
}
}
}
}
spin_lock_irqsave(&dvtaskQ_lock, flags);
dvtaskQ_active = 0;
spin_unlock_irqrestore(&dvtaskQ_lock, flags);
return;
}
/* Write SDP1 if no QAS has been enabled
*/
static void
mptscsih_qas_check(MPT_SCSI_HOST *hd, int id)
{
VirtTarget *vtarget;
int ii;
if (hd->Targets == NULL)
return;
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
if (ii == id)
continue;
if ((hd->ioc->spi_data.dvStatus[ii] & MPT_SCSICFG_DV_NOT_DONE) != 0)
continue;
vtarget = hd->Targets[ii];
if ((vtarget != NULL) && (!vtarget->raidVolume)) {
if ((vtarget->negoFlags & hd->ioc->spi_data.noQas) == 0) {
vtarget->negoFlags |= hd->ioc->spi_data.noQas;
dnegoprintk(("writeSDP1: id=%d flags=0\n", id));
mptscsih_writeSDP1(hd, 0, ii, 0);
}
} else {
if (mptscsih_is_phys_disk(hd->ioc, ii) == 1) {
dnegoprintk(("writeSDP1: id=%d SCSICFG_USE_NVRAM\n", id));
mptscsih_writeSDP1(hd, 0, ii, MPT_SCSICFG_USE_NVRAM);
}
}
}
return;
}
#define MPT_GET_NVRAM_VALS 0x01
#define MPT_UPDATE_MAX 0x02
#define MPT_SET_MAX 0x04
#define MPT_SET_MIN 0x08
#define MPT_FALLBACK 0x10
#define MPT_SAVE 0x20
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptscsih_doDv - Perform domain validation to a target.
* @hd: Pointer to MPT_SCSI_HOST structure.
* @portnum: IOC port number.
* @target: Physical ID of this target
*
* Uses the ISR, but with special processing.
* MUST be single-threaded.
* Test will exit if target is at async & narrow.
*
* Return: None.
*/
static int
mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
{
MPT_ADAPTER *ioc = hd->ioc;
VirtTarget *vtarget;
SCSIDevicePage1_t *pcfg1Data;
SCSIDevicePage0_t *pcfg0Data;
u8 *pbuf1;
u8 *pbuf2;
u8 *pDvBuf;
dma_addr_t dvbuf_dma = -1;
dma_addr_t buf1_dma = -1;
dma_addr_t buf2_dma = -1;
dma_addr_t cfg1_dma_addr = -1;
dma_addr_t cfg0_dma_addr = -1;
ConfigPageHeader_t header1;
ConfigPageHeader_t header0;
DVPARAMETERS dv;
INTERNAL_CMD iocmd;
CONFIGPARMS cfg;
int dv_alloc = 0;
int rc, sz = 0;
int bufsize = 0;
int dataBufSize = 0;
int echoBufSize = 0;
int notDone;
int patt;
int repeat;
int retcode = 0;
int nfactor = MPT_ULTRA320;
char firstPass = 1;
char doFallback = 0;
char readPage0;
char bus, lun;
char inq0 = 0;
if (ioc->spi_data.sdp1length == 0)
return 0;
if (ioc->spi_data.sdp0length == 0)
return 0;
/* If multiple buses are used, require that the initiator
* id be the same on all buses.
*/
if (id == ioc->pfacts[0].PortSCSIID)
return 0;
lun = 0;
bus = (u8) bus_number;
ddvtprintk((MYIOC_s_NOTE_FMT
"DV started: bus=%d, id=%d dv @ %p\n",
ioc->name, bus, id, &dv));
/* Prep DV structure
*/
memset (&dv, 0, sizeof(DVPARAMETERS));
dv.id = id;
/* Populate tmax with the current maximum
* transfer parameters for this target.
* Exit if narrow and async.
*/
dv.cmd = MPT_GET_NVRAM_VALS;
mptscsih_dv_parms(hd, &dv, NULL);
/* Prep SCSI IO structure
*/
iocmd.id = id;
iocmd.bus = bus;
iocmd.lun = lun;
iocmd.flags = 0;
iocmd.physDiskNum = -1;
iocmd.rsvd = iocmd.rsvd2 = 0;
vtarget = hd->Targets[id];
/* Use tagged commands if possible.
*/
if (vtarget) {
if (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
iocmd.flags |= MPT_ICFLAG_TAGGED_CMD;
else {
if (hd->ioc->facts.FWVersion.Word < 0x01000600)
return 0;
if ((hd->ioc->facts.FWVersion.Word >= 0x01010000) &&
(hd->ioc->facts.FWVersion.Word < 0x01010B00))
return 0;
}
}
/* Prep cfg structure
*/
cfg.pageAddr = (bus<<8) | id;
cfg.cfghdr.hdr = NULL;
/* Prep SDP0 header
*/
header0.PageVersion = ioc->spi_data.sdp0version;
header0.PageLength = ioc->spi_data.sdp0length;
header0.PageNumber = 0;
header0.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
/* Prep SDP1 header
*/
header1.PageVersion = ioc->spi_data.sdp1version;
header1.PageLength = ioc->spi_data.sdp1length;
header1.PageNumber = 1;
header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
if (header0.PageLength & 1)
dv_alloc = (header0.PageLength * 4) + 4;
dv_alloc += (2048 + (header1.PageLength * 4));
pDvBuf = pci_alloc_consistent(ioc->pcidev, dv_alloc, &dvbuf_dma);
if (pDvBuf == NULL)
return 0;
sz = 0;
pbuf1 = (u8 *)pDvBuf;
buf1_dma = dvbuf_dma;
sz +=1024;
pbuf2 = (u8 *) (pDvBuf + sz);
buf2_dma = dvbuf_dma + sz;
sz +=1024;
pcfg0Data = (SCSIDevicePage0_t *) (pDvBuf + sz);
cfg0_dma_addr = dvbuf_dma + sz;
sz += header0.PageLength * 4;
/* 8-byte alignment
*/
if (header0.PageLength & 1)
sz += 4;
pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz);
cfg1_dma_addr = dvbuf_dma + sz;
/* Skip this ID? Set cfg.cfghdr.hdr to force config page write
*/
{
SpiCfgData *pspi_data = &hd->ioc->spi_data;
if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
/* Set the factor from nvram */
nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
if (nfactor < pspi_data->minSyncFactor )
nfactor = pspi_data->minSyncFactor;
if (!(pspi_data->nvram[id] & MPT_NVRAM_ID_SCAN_ENABLE) ||
(pspi_data->PortFlags == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) ) {
ddvprintk((MYIOC_s_NOTE_FMT "DV Skipped: bus, id, lun (%d, %d, %d)\n",
ioc->name, bus, id, lun));
dv.cmd = MPT_SET_MAX;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
cfg.cfghdr.hdr = &header1;
/* Save the final negotiated settings to
* SCSI device page 1.
*/
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
mpt_config(hd->ioc, &cfg);
goto target_done;
}
}
}
/* Finish iocmd inititialization - hidden or visible disk? */
if (ioc->raid_data.pIocPg3) {
/* Search IOC page 3 for matching id
*/
Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
if (pPDisk->PhysDiskID == id) {
/* match */
iocmd.flags |= MPT_ICFLAG_PHYS_DISK;
iocmd.physDiskNum = pPDisk->PhysDiskNum;
/* Quiesce the IM
*/
if (mptscsih_do_raid(hd, MPI_RAID_ACTION_QUIESCE_PHYS_IO, &iocmd) < 0) {
ddvprintk((MYIOC_s_ERR_FMT "RAID Queisce FAILED!\n", ioc->name));
goto target_done;
}
break;
}
pPDisk++;
numPDisk--;
}
}
/* RAID Volume ID's may double for a physical device. If RAID but
* not a physical ID as well, skip DV.
*/
if ((hd->ioc->raid_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
goto target_done;
/* Basic Test.
* Async & Narrow - Inquiry
* Async & Narrow - Inquiry
* Maximum transfer rate - Inquiry
* Compare buffers:
* If compare, test complete.
* If miscompare and first pass, repeat
* If miscompare and not first pass, fall back and repeat
*/
hd->pLocal = NULL;
readPage0 = 0;
sz = SCSI_MAX_INQUIRY_BYTES;
rc = MPT_SCANDV_GOOD;
while (1) {
ddvprintk((MYIOC_s_NOTE_FMT "DV: Start Basic test on id=%d\n", ioc->name, id));
retcode = 0;
dv.cmd = MPT_SET_MIN;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
if (mpt_config(hd->ioc, &cfg) != 0)
goto target_done;
/* Wide - narrow - wide workaround case
*/
if ((rc == MPT_SCANDV_ISSUE_SENSE) && dv.max.width) {
/* Send an untagged command to reset disk Qs corrupted
* when a parity error occurs on a Request Sense.
*/
if ((hd->ioc->facts.FWVersion.Word >= 0x01000600) ||
((hd->ioc->facts.FWVersion.Word >= 0x01010000) &&
(hd->ioc->facts.FWVersion.Word < 0x01010B00)) ) {
iocmd.cmd = REQUEST_SENSE;
iocmd.data_dma = buf1_dma;
iocmd.data = pbuf1;
iocmd.size = 0x12;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else {
if (hd->pLocal == NULL)
goto target_done;
rc = hd->pLocal->completion;
if ((rc == MPT_SCANDV_GOOD) || (rc == MPT_SCANDV_SENSE)) {
dv.max.width = 0;
doFallback = 0;
} else
goto target_done;
}
} else
goto target_done;
}
iocmd.cmd = INQUIRY;
iocmd.data_dma = buf1_dma;
iocmd.data = pbuf1;
iocmd.size = sz;
memset(pbuf1, 0x00, sz);
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else {
if (hd->pLocal == NULL)
goto target_done;
rc = hd->pLocal->completion;
if (rc == MPT_SCANDV_GOOD) {
if (hd->pLocal->scsiStatus == SAM_STAT_BUSY) {
if ((iocmd.flags & MPT_ICFLAG_TAGGED_CMD) == 0)
retcode = 1;
else
retcode = 0;
goto target_done;
}
} else if (rc == MPT_SCANDV_SENSE) {
;
} else {
/* If first command doesn't complete
* with a good status or with a check condition,
* exit.
*/
goto target_done;
}
}
/* Reset the size for disks
*/
inq0 = (*pbuf1) & 0x1F;
if ((inq0 == 0) && vtarget && !vtarget->raidVolume) {
sz = 0x40;
iocmd.size = sz;
}
/* Another GEM workaround. Check peripheral device type,
* if PROCESSOR, quit DV.
*/
if (inq0 == TYPE_PROCESSOR) {
mptscsih_initTarget(hd,
vtarget,
lun,
pbuf1,
sz);
goto target_done;
}
if (inq0 > 0x08)
goto target_done;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
if (sz == 0x40) {
if ((vtarget->maxWidth == 1) && (vtarget->maxOffset) && (nfactor < 0x0A)
&& (vtarget->minSyncFactor > 0x09)) {
if ((pbuf1[56] & 0x04) == 0)
;
else if ((pbuf1[56] & 0x01) == 1) {
vtarget->minSyncFactor =
nfactor > MPT_ULTRA320 ? nfactor : MPT_ULTRA320;
} else {
vtarget->minSyncFactor =
nfactor > MPT_ULTRA160 ? nfactor : MPT_ULTRA160;
}
dv.max.factor = vtarget->minSyncFactor;
if ((pbuf1[56] & 0x02) == 0) {
vtarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
ddvprintk((MYIOC_s_NOTE_FMT
"DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
ioc->name, id, pbuf1[56]));
}
}
}
if (doFallback)
dv.cmd = MPT_FALLBACK;
else
dv.cmd = MPT_SET_MAX;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
if (mpt_config(hd->ioc, &cfg) != 0)
goto target_done;
if ((!dv.now.width) && (!dv.now.offset))
goto target_done;
iocmd.cmd = INQUIRY;
iocmd.data_dma = buf2_dma;
iocmd.data = pbuf2;
iocmd.size = sz;
memset(pbuf2, 0x00, sz);
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else if (hd->pLocal == NULL)
goto target_done;
else {
/* Save the return code.
* If this is the first pass,
* read SCSI Device Page 0
* and update the target max parameters.
*/
rc = hd->pLocal->completion;
doFallback = 0;
if (rc == MPT_SCANDV_GOOD) {
if (!readPage0) {
u32 sdp0_info;
u32 sdp0_nego;
cfg.cfghdr.hdr = &header0;
cfg.physAddr = cfg0_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
if (mpt_config(hd->ioc, &cfg) != 0)
goto target_done;
sdp0_info = le32_to_cpu(pcfg0Data->Information) & 0x0E;
sdp0_nego = (le32_to_cpu(pcfg0Data->NegotiatedParameters) & 0xFF00 ) >> 8;
/* Quantum and Fujitsu workarounds.
* Quantum: PPR U320 -> PPR reply with Ultra2 and wide
* Fujitsu: PPR U320 -> Msg Reject and Ultra2 and wide
* Resetart with a request for U160.
*/
if ((dv.now.factor == MPT_ULTRA320) && (sdp0_nego == MPT_ULTRA2)) {
doFallback = 1;
} else {
dv.cmd = MPT_UPDATE_MAX;
mptscsih_dv_parms(hd, &dv, (void *)pcfg0Data);
/* Update the SCSI device page 1 area
*/
pcfg1Data->RequestedParameters = pcfg0Data->NegotiatedParameters;
readPage0 = 1;
}
}
/* Quantum workaround. Restart this test will the fallback
* flag set.
*/
if (doFallback == 0) {
if (memcmp(pbuf1, pbuf2, sz) != 0) {
if (!firstPass)
doFallback = 1;
} else {
ddvprintk((MYIOC_s_NOTE_FMT
"DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
mptscsih_initTarget(hd,
vtarget,
lun,
pbuf1,
sz);
break; /* test complete */
}
}
} else if (rc == MPT_SCANDV_ISSUE_SENSE)
doFallback = 1; /* set fallback flag */
else if ((rc == MPT_SCANDV_DID_RESET) ||
(rc == MPT_SCANDV_SENSE) ||
(rc == MPT_SCANDV_FALLBACK))
doFallback = 1; /* set fallback flag */
else
goto target_done;
firstPass = 0;
}
}
ddvprintk((MYIOC_s_NOTE_FMT "DV: Basic test on id=%d completed OK.\n", ioc->name, id));
if (ioc->spi_data.mpt_dv == 0)
goto target_done;
inq0 = (*pbuf1) & 0x1F;
/* Continue only for disks
*/
if (inq0 != 0)
goto target_done;
if ( ioc->spi_data.PortFlags == MPI_SCSIPORTPAGE2_PORT_FLAGS_BASIC_DV_ONLY )
goto target_done;
/* Start the Enhanced Test.
* 0) issue TUR to clear out check conditions
* 1) read capacity of echo (regular) buffer
* 2) reserve device
* 3) do write-read-compare data pattern test
* 4) release
* 5) update nego parms to target struct
*/
cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
iocmd.cmd = TEST_UNIT_READY;
iocmd.data_dma = -1;
iocmd.data = NULL;
iocmd.size = 0;
notDone = 1;
while (notDone) {
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
if (hd->pLocal == NULL)
goto target_done;
rc = hd->pLocal->completion;
if (rc == MPT_SCANDV_GOOD)
notDone = 0;
else if (rc == MPT_SCANDV_SENSE) {
u8 skey = hd->pLocal->sense[2] & 0x0F;
u8 asc = hd->pLocal->sense[12];
u8 ascq = hd->pLocal->sense[13];
ddvprintk((MYIOC_s_INFO_FMT
"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
ioc->name, skey, asc, ascq));
if (skey == UNIT_ATTENTION)
notDone++; /* repeat */
else if ((skey == NOT_READY) &&
(asc == 0x04)&&(ascq == 0x01)) {
/* wait then repeat */
mdelay (2000);
notDone++;
} else if ((skey == NOT_READY) && (asc == 0x3A)) {
/* no medium, try read test anyway */
notDone = 0;
} else {
/* All other errors are fatal.
*/
ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.",
ioc->name));
goto target_done;
}
} else
goto target_done;
}
iocmd.cmd = READ_BUFFER;
iocmd.data_dma = buf1_dma;
iocmd.data = pbuf1;
iocmd.size = 4;
iocmd.flags |= MPT_ICFLAG_BUF_CAP;
dataBufSize = 0;
echoBufSize = 0;
for (patt = 0; patt < 2; patt++) {
if (patt == 0)
iocmd.flags |= MPT_ICFLAG_ECHO;
else
iocmd.flags &= ~MPT_ICFLAG_ECHO;
notDone = 1;
while (notDone) {
bufsize = 0;
/* If not ready after 8 trials,
* give up on this device.
*/
if (notDone > 8)
goto target_done;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else if (hd->pLocal == NULL)
goto target_done;
else {
rc = hd->pLocal->completion;
ddvprintk(("ReadBuffer Comp Code %d", rc));
ddvprintk((" buff: %0x %0x %0x %0x\n",
pbuf1[0], pbuf1[1], pbuf1[2], pbuf1[3]));
if (rc == MPT_SCANDV_GOOD) {
notDone = 0;
if (iocmd.flags & MPT_ICFLAG_ECHO) {
bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
if (pbuf1[0] & 0x01)
iocmd.flags |= MPT_ICFLAG_EBOS;
} else {
bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
}
} else if (rc == MPT_SCANDV_SENSE) {
u8 skey = hd->pLocal->sense[2] & 0x0F;
u8 asc = hd->pLocal->sense[12];
u8 ascq = hd->pLocal->sense[13];
ddvprintk((MYIOC_s_INFO_FMT
"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
ioc->name, skey, asc, ascq));
if (skey == ILLEGAL_REQUEST) {
notDone = 0;
} else if (skey == UNIT_ATTENTION) {
notDone++; /* repeat */
} else if ((skey == NOT_READY) &&
(asc == 0x04)&&(ascq == 0x01)) {
/* wait then repeat */
mdelay (2000);
notDone++;
} else {
/* All other errors are fatal.
*/
ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.",
ioc->name));
goto target_done;
}
} else {
/* All other errors are fatal
*/
goto target_done;
}
}
}
if (iocmd.flags & MPT_ICFLAG_ECHO)
echoBufSize = bufsize;
else
dataBufSize = bufsize;
}
sz = 0;
iocmd.flags &= ~MPT_ICFLAG_BUF_CAP;
/* Use echo buffers if possible,
* Exit if both buffers are 0.
*/
if (echoBufSize > 0) {
iocmd.flags |= MPT_ICFLAG_ECHO;
if (dataBufSize > 0)
bufsize = min(echoBufSize, dataBufSize);
else
bufsize = echoBufSize;
} else if (dataBufSize == 0)
goto target_done;
ddvprintk((MYIOC_s_INFO_FMT "%s Buffer Capacity %d\n", ioc->name,
(iocmd.flags & MPT_ICFLAG_ECHO) ? "Echo" : " ", bufsize));
/* Data buffers for write-read-compare test max 1K.
*/
sz = min(bufsize, 1024);
/* --- loop ----
* On first pass, always issue a reserve.
* On additional loops, only if a reset has occurred.
* iocmd.flags indicates if echo or regular buffer
*/
for (patt = 0; patt < 4; patt++) {
ddvprintk(("Pattern %d\n", patt));
if ((iocmd.flags & MPT_ICFLAG_RESERVED) && (iocmd.flags & MPT_ICFLAG_DID_RESET)) {
iocmd.cmd = TEST_UNIT_READY;
iocmd.data_dma = -1;
iocmd.data = NULL;
iocmd.size = 0;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
iocmd.cmd = RELEASE;
iocmd.data_dma = -1;
iocmd.data = NULL;
iocmd.size = 0;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else if (hd->pLocal == NULL)
goto target_done;
else {
rc = hd->pLocal->completion;
ddvprintk(("Release rc %d\n", rc));
if (rc == MPT_SCANDV_GOOD)
iocmd.flags &= ~MPT_ICFLAG_RESERVED;
else
goto target_done;
}
iocmd.flags &= ~MPT_ICFLAG_RESERVED;
}
iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
if (iocmd.flags & MPT_ICFLAG_EBOS)
goto skip_Reserve;
repeat = 5;
while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
iocmd.cmd = RESERVE;
iocmd.data_dma = -1;
iocmd.data = NULL;
iocmd.size = 0;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else if (hd->pLocal == NULL)
goto target_done;
else {
rc = hd->pLocal->completion;
if (rc == MPT_SCANDV_GOOD) {
iocmd.flags |= MPT_ICFLAG_RESERVED;
} else if (rc == MPT_SCANDV_SENSE) {
/* Wait if coming ready
*/
u8 skey = hd->pLocal->sense[2] & 0x0F;
u8 asc = hd->pLocal->sense[12];
u8 ascq = hd->pLocal->sense[13];
ddvprintk((MYIOC_s_INFO_FMT
"DV: Reserve Failed: ", ioc->name));
ddvprintk(("SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
skey, asc, ascq));
if ((skey == NOT_READY) && (asc == 0x04)&&
(ascq == 0x01)) {
/* wait then repeat */
mdelay (2000);
notDone++;
} else {
ddvprintk((MYIOC_s_INFO_FMT
"DV: Reserved Failed.", ioc->name));
goto target_done;
}
} else {
ddvprintk((MYIOC_s_INFO_FMT "DV: Reserved Failed.",
ioc->name));
goto target_done;
}
}
}
skip_Reserve:
mptscsih_fillbuf(pbuf1, sz, patt, 1);
iocmd.cmd = WRITE_BUFFER;
iocmd.data_dma = buf1_dma;
iocmd.data = pbuf1;
iocmd.size = sz;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else if (hd->pLocal == NULL)
goto target_done;
else {
rc = hd->pLocal->completion;
if (rc == MPT_SCANDV_GOOD)
; /* Issue read buffer */
else if (rc == MPT_SCANDV_DID_RESET) {
/* If using echo buffers, reset to data buffers.
* Else do Fallback and restart
* this test (re-issue reserve
* because of bus reset).
*/
if ((iocmd.flags & MPT_ICFLAG_ECHO) && (dataBufSize >= bufsize)) {
iocmd.flags &= ~MPT_ICFLAG_ECHO;
} else {
dv.cmd = MPT_FALLBACK;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
if (mpt_config(hd->ioc, &cfg) != 0)
goto target_done;
if ((!dv.now.width) && (!dv.now.offset))
goto target_done;
}
iocmd.flags |= MPT_ICFLAG_DID_RESET;
patt = -1;
continue;
} else if (rc == MPT_SCANDV_SENSE) {
/* Restart data test if UA, else quit.
*/
u8 skey = hd->pLocal->sense[2] & 0x0F;
ddvprintk((MYIOC_s_INFO_FMT
"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey,
hd->pLocal->sense[12], hd->pLocal->sense[13]));
if (skey == UNIT_ATTENTION) {
patt = -1;
continue;
} else if (skey == ILLEGAL_REQUEST) {
if (iocmd.flags & MPT_ICFLAG_ECHO) {
if (dataBufSize >= bufsize) {
iocmd.flags &= ~MPT_ICFLAG_ECHO;
patt = -1;
continue;
}
}
goto target_done;
}
else
goto target_done;
} else {
/* fatal error */
goto target_done;
}
}
iocmd.cmd = READ_BUFFER;
iocmd.data_dma = buf2_dma;
iocmd.data = pbuf2;
iocmd.size = sz;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
goto target_done;
else if (hd->pLocal == NULL)
goto target_done;
else {
rc = hd->pLocal->completion;
if (rc == MPT_SCANDV_GOOD) {
/* If buffers compare,
* go to next pattern,
* else, do a fallback and restart
* data transfer test.
*/
if (memcmp (pbuf1, pbuf2, sz) == 0) {
; /* goto next pattern */
} else {
/* Miscompare with Echo buffer, go to data buffer,
* if that buffer exists.
* Miscompare with Data buffer, check first 4 bytes,
* some devices return capacity. Exit in this case.
*/
if (iocmd.flags & MPT_ICFLAG_ECHO) {
if (dataBufSize >= bufsize)
iocmd.flags &= ~MPT_ICFLAG_ECHO;
else
goto target_done;
} else {
if (dataBufSize == (pbuf2[1]<<16 | pbuf2[2]<<8 | pbuf2[3])) {
/* Argh. Device returning wrong data.
* Quit DV for this device.
*/
goto target_done;
}
/* Had an actual miscompare. Slow down.*/
dv.cmd = MPT_FALLBACK;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
if (mpt_config(hd->ioc, &cfg) != 0)
goto target_done;
if ((!dv.now.width) && (!dv.now.offset))
goto target_done;
}
patt = -1;
continue;
}
} else if (rc == MPT_SCANDV_DID_RESET) {
/* Do Fallback and restart
* this test (re-issue reserve
* because of bus reset).
*/
dv.cmd = MPT_FALLBACK;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
if (mpt_config(hd->ioc, &cfg) != 0)
goto target_done;
if ((!dv.now.width) && (!dv.now.offset))
goto target_done;
iocmd.flags |= MPT_ICFLAG_DID_RESET;
patt = -1;
continue;
} else if (rc == MPT_SCANDV_SENSE) {
/* Restart data test if UA, else quit.
*/
u8 skey = hd->pLocal->sense[2] & 0x0F;
ddvprintk((MYIOC_s_INFO_FMT
"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey,
hd->pLocal->sense[12], hd->pLocal->sense[13]));
if (skey == UNIT_ATTENTION) {
patt = -1;
continue;
}
else
goto target_done;
} else {
/* fatal error */
goto target_done;
}
}
} /* --- end of patt loop ---- */
target_done:
if (iocmd.flags & MPT_ICFLAG_RESERVED) {
iocmd.cmd = RELEASE;
iocmd.data_dma = -1;
iocmd.data = NULL;
iocmd.size = 0;
if (mptscsih_do_cmd(hd, &iocmd) < 0)
printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d",
ioc->name, id);
else if (hd->pLocal) {
if (hd->pLocal->completion == MPT_SCANDV_GOOD)
iocmd.flags &= ~MPT_ICFLAG_RESERVED;
} else {
printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d",
ioc->name, id);
}
}
/* Set if cfg1_dma_addr contents is valid
*/
if ((cfg.cfghdr.hdr != NULL) && (retcode == 0)){
/* If disk, not U320, disable QAS
*/
if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) {
hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
ddvprintk((MYIOC_s_NOTE_FMT
"noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor));
}
dv.cmd = MPT_SAVE;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
/* Double writes to SDP1 can cause problems,
* skip save of the final negotiated settings to
* SCSI device page 1.
*
cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
mpt_config(hd->ioc, &cfg);
*/
}
/* If this is a RAID Passthrough, enable internal IOs
*/
if (iocmd.flags & MPT_ICFLAG_PHYS_DISK) {
if (mptscsih_do_raid(hd, MPI_RAID_ACTION_ENABLE_PHYS_IO, &iocmd) < 0)
ddvprintk((MYIOC_s_ERR_FMT "RAID Enable FAILED!\n", ioc->name));
}
/* Done with the DV scan of the current target
*/
if (pDvBuf)
pci_free_consistent(ioc->pcidev, dv_alloc, pDvBuf, dvbuf_dma);
ddvtprintk((MYIOC_s_INFO_FMT "DV Done id=%d\n",
ioc->name, id));
return retcode;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptscsih_dv_parms - perform a variety of operations on the
* parameters used for negotiation.
* @hd: Pointer to a SCSI host.
* @dv: Pointer to a structure that contains the maximum and current
* negotiated parameters.
*/
static void
mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
{
VirtTarget *vtarget;
SCSIDevicePage0_t *pPage0;
SCSIDevicePage1_t *pPage1;
int val = 0, data, configuration;
u8 width = 0;
u8 offset = 0;
u8 factor = 0;
u8 negoFlags = 0;
u8 cmd = dv->cmd;
u8 id = dv->id;
switch (cmd) {
case MPT_GET_NVRAM_VALS:
ddvprintk((MYIOC_s_NOTE_FMT "Getting NVRAM: ",
hd->ioc->name));
/* Get the NVRAM values and save in tmax
* If not an LVD bus, the adapter minSyncFactor has been
* already throttled back.
*/
negoFlags = hd->ioc->spi_data.noQas;
if ((hd->Targets)&&((vtarget = hd->Targets[(int)id]) != NULL) && !vtarget->raidVolume) {
width = vtarget->maxWidth;
offset = vtarget->maxOffset;
factor = vtarget->minSyncFactor;
negoFlags |= vtarget->negoFlags;
} else {
if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
data = hd->ioc->spi_data.nvram[id];
width = data & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
if ((offset = hd->ioc->spi_data.maxSyncOffset) == 0)
factor = MPT_ASYNC;
else {
factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
if ((factor == 0) || (factor == MPT_ASYNC)){
factor = MPT_ASYNC;
offset = 0;
}
}
} else {
width = MPT_NARROW;
offset = 0;
factor = MPT_ASYNC;
}
/* Set the negotiation flags */
if (!width)
negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
if (!offset)
negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
}
/* limit by adapter capabilities */
width = min(width, hd->ioc->spi_data.maxBusWidth);
offset = min(offset, hd->ioc->spi_data.maxSyncOffset);
factor = max(factor, hd->ioc->spi_data.minSyncFactor);
/* Check Consistency */
if (offset && (factor < MPT_ULTRA2) && !width)
factor = MPT_ULTRA2;
dv->max.width = width;
dv->max.offset = offset;
dv->max.factor = factor;
dv->max.flags = negoFlags;
ddvprintk((" id=%d width=%d factor=%x offset=%x flags=%x\n",
id, width, factor, offset, negoFlags));
break;
case MPT_UPDATE_MAX:
ddvprintk((MYIOC_s_NOTE_FMT
"Updating with SDP0 Data: ", hd->ioc->name));
/* Update tmax values with those from Device Page 0.*/
pPage0 = (SCSIDevicePage0_t *) pPage;
if (pPage0) {
val = le32_to_cpu(pPage0->NegotiatedParameters);
dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0;
dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16;
dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
}
dv->now.width = dv->max.width;
dv->now.offset = dv->max.offset;
dv->now.factor = dv->max.factor;
ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags));
break;
case MPT_SET_MAX:
ddvprintk((MYIOC_s_NOTE_FMT "Setting Max: ",
hd->ioc->name));
/* Set current to the max values. Update the config page.*/
dv->now.width = dv->max.width;
dv->now.offset = dv->max.offset;
dv->now.factor = dv->max.factor;
dv->now.flags = dv->max.flags;
pPage1 = (SCSIDevicePage1_t *)pPage;
if (pPage1) {
mptscsih_setDevicePage1Flags (dv->now.width, dv->now.factor,
dv->now.offset, &val, &configuration, dv->now.flags);
dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0;
pPage1->Configuration = cpu_to_le32(configuration);
}
ddvprintk(("id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x configuration=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
break;
case MPT_SET_MIN:
ddvprintk((MYIOC_s_NOTE_FMT "Setting Min: ",
hd->ioc->name));
/* Set page to asynchronous and narrow
* Do not update now, breaks fallback routine. */
width = MPT_NARROW;
offset = 0;
factor = MPT_ASYNC;
negoFlags = dv->max.flags;
pPage1 = (SCSIDevicePage1_t *)pPage;
if (pPage1) {
mptscsih_setDevicePage1Flags (width, factor,
offset, &val, &configuration, negoFlags);
dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
id, width, factor, offset, negoFlags, val, configuration));
pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0;
pPage1->Configuration = cpu_to_le32(configuration);
}
ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n",
id, width, factor, offset, val, configuration, negoFlags));
break;
case MPT_FALLBACK:
ddvprintk((MYIOC_s_NOTE_FMT
"Fallback: Start: offset %d, factor %x, width %d \n",
hd->ioc->name, dv->now.offset,
dv->now.factor, dv->now.width));
width = dv->now.width;
offset = dv->now.offset;
factor = dv->now.factor;
if ((offset) && (dv->max.width)) {
if (factor < MPT_ULTRA160)
factor = MPT_ULTRA160;
else if (factor < MPT_ULTRA2) {
factor = MPT_ULTRA2;
width = MPT_WIDE;
} else if ((factor == MPT_ULTRA2) && width) {
factor = MPT_ULTRA2;
width = MPT_NARROW;
} else if (factor < MPT_ULTRA) {
factor = MPT_ULTRA;
width = MPT_WIDE;
} else if ((factor == MPT_ULTRA) && width) {
width = MPT_NARROW;
} else if (factor < MPT_FAST) {
factor = MPT_FAST;
width = MPT_WIDE;
} else if ((factor == MPT_FAST) && width) {
factor = MPT_FAST;
width = MPT_NARROW;
} else if (factor < MPT_SCSI) {
factor = MPT_SCSI;
width = MPT_WIDE;
} else if ((factor == MPT_SCSI) && width) {
factor = MPT_SCSI;
width = MPT_NARROW;
} else {
factor = MPT_ASYNC;
offset = 0;
}
} else if (offset) {
width = MPT_NARROW;
if (factor < MPT_ULTRA)
factor = MPT_ULTRA;
else if (factor < MPT_FAST)
factor = MPT_FAST;
else if (factor < MPT_SCSI)
factor = MPT_SCSI;
else {
factor = MPT_ASYNC;
offset = 0;
}
} else {
width = MPT_NARROW;
factor = MPT_ASYNC;
}
dv->max.flags |= MPT_TARGET_NO_NEGO_QAS;
dv->max.flags &= ~MPT_TAPE_NEGO_IDP;
dv->now.width = width;
dv->now.offset = offset;
dv->now.factor = factor;
dv->now.flags = dv->max.flags;
pPage1 = (SCSIDevicePage1_t *)pPage;
if (pPage1) {
mptscsih_setDevicePage1Flags (width, factor, offset, &val,
&configuration, dv->now.flags);
dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x negoFlags=%x request=%x config=%x\n",
id, width, offset, factor, dv->now.flags, val, configuration));
pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0;
pPage1->Configuration = cpu_to_le32(configuration);
}
ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n",
id, dv->now.offset, dv->now.factor, dv->now.width, val, configuration));
break;
case MPT_SAVE:
ddvprintk((MYIOC_s_NOTE_FMT
"Saving to Target structure: ", hd->ioc->name));
ddvprintk(("id=%d width=%x factor=%x offset=%d flags=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags));
/* Save these values to target structures
* or overwrite nvram (phys disks only).
*/
if ((hd->Targets)&&((vtarget = hd->Targets[(int)id]) != NULL) && !vtarget->raidVolume ) {
vtarget->maxWidth = dv->now.width;
vtarget->maxOffset = dv->now.offset;
vtarget->minSyncFactor = dv->now.factor;
vtarget->negoFlags = dv->now.flags;
} else {
/* Preserv all flags, use
* read-modify-write algorithm
*/
if (hd->ioc->spi_data.nvram) {
data = hd->ioc->spi_data.nvram[id];
if (dv->now.width)
data &= ~MPT_NVRAM_WIDE_DISABLE;
else
data |= MPT_NVRAM_WIDE_DISABLE;
if (!dv->now.offset)
factor = MPT_ASYNC;
data &= ~MPT_NVRAM_SYNC_MASK;
data |= (dv->now.factor << MPT_NVRAM_SYNC_SHIFT) & MPT_NVRAM_SYNC_MASK;
hd->ioc->spi_data.nvram[id] = data;
}
}
break;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* mptscsih_fillbuf - fill a buffer with a special data pattern
* cleanup. For bus scan only.
*
* @buffer: Pointer to data buffer to be filled.
* @size: Number of bytes to fill
* @index: Pattern index
* @width: bus width, 0 (8 bits) or 1 (16 bits)
*/
static void
mptscsih_fillbuf(char *buffer, int size, int index, int width)
{
char *ptr = buffer;
int ii;
char byte;
short val;
switch (index) {
case 0:
if (width) {
/* Pattern: 0000 FFFF 0000 FFFF
*/
for (ii=0; ii < size; ii++, ptr++) {
if (ii & 0x02)
*ptr = 0xFF;
else
*ptr = 0x00;
}
} else {
/* Pattern: 00 FF 00 FF
*/
for (ii=0; ii < size; ii++, ptr++) {
if (ii & 0x01)
*ptr = 0xFF;
else
*ptr = 0x00;
}
}
break;
case 1:
if (width) {
/* Pattern: 5555 AAAA 5555 AAAA 5555
*/
for (ii=0; ii < size; ii++, ptr++) {
if (ii & 0x02)
*ptr = 0xAA;
else
*ptr = 0x55;
}
} else {
/* Pattern: 55 AA 55 AA 55
*/
for (ii=0; ii < size; ii++, ptr++) {
if (ii & 0x01)
*ptr = 0xAA;
else
*ptr = 0x55;
}
}
break;
case 2:
/* Pattern: 00 01 02 03 04 05
* ... FE FF 00 01..
*/
for (ii=0; ii < size; ii++, ptr++)
*ptr = (char) ii;
break;
case 3:
if (width) {
/* Wide Pattern: FFFE 0001 FFFD 0002
* ... 4000 DFFF 8000 EFFF
*/
byte = 0;
for (ii=0; ii < size/2; ii++) {
/* Create the base pattern
*/
val = (1 << byte);
/* every 64 (0x40) bytes flip the pattern
* since we fill 2 bytes / iteration,
* test for ii = 0x20
*/
if (ii & 0x20)
val = ~(val);
if (ii & 0x01) {
*ptr = (char)( (val & 0xFF00) >> 8);
ptr++;
*ptr = (char)(val & 0xFF);
byte++;
byte &= 0x0F;
} else {
val = ~val;
*ptr = (char)( (val & 0xFF00) >> 8);
ptr++;
*ptr = (char)(val & 0xFF);
}
ptr++;
}
} else {
/* Narrow Pattern: FE 01 FD 02 FB 04
* .. 7F 80 01 FE 02 FD ... 80 7F
*/
byte = 0;
for (ii=0; ii < size; ii++, ptr++) {
/* Base pattern - first 32 bytes
*/
if (ii & 0x01) {
*ptr = (1 << byte);
byte++;
byte &= 0x07;
} else {
*ptr = (char) (~(1 << byte));
}
/* Flip the pattern every 32 bytes
*/
if (ii & 0x20)
*ptr = ~(*ptr);
}
}
break;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/* If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return.
* Else set the NEED_DV flag after Read Capacity Issued (disks)
* or Mode Sense (cdroms).
*
* Tapes, initTarget will set this flag on completion of Inquiry command.
* Called only if DV_NOT_DONE flag is set
*/
static void
mptscsih_set_dvflags(MPT_SCSI_HOST *hd, struct scsi_cmnd *sc)
{
MPT_ADAPTER *ioc = hd->ioc;
u8 cmd;
SpiCfgData *pSpi;
ddvtprintk((MYIOC_s_NOTE_FMT
" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
hd->ioc->name, sc->device->id, sc->device->lun , hd->negoNvram, sc->cmnd[0]));
if ((sc->device->lun != 0) || (hd->negoNvram != 0))
return;
cmd = sc->cmnd[0];
if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
pSpi = &ioc->spi_data;
if ((ioc->raid_data.isRaid & (1 << sc->device->id)) && ioc->raid_data.pIocPg3) {
/* Set NEED_DV for all hidden disks
*/
Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
pPDisk++;
numPDisk--;
}
}
pSpi->dvStatus[sc->device->id] |= MPT_SCSICFG_NEED_DV;
ddvtprintk(("NEED_DV set for visible disk id %d\n", sc->device->id));
}
}
/* mptscsih_raid_set_dv_flags()
*
* New or replaced disk. Set DV flag and schedule DV.
*/
static void
mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
{
MPT_ADAPTER *ioc = hd->ioc;
SpiCfgData *pSpi = &ioc->spi_data;
Ioc3PhysDisk_t *pPDisk;
int numPDisk;
if (hd->negoNvram != 0)
return;
ddvtprintk(("DV requested for phys disk id %d\n", id));
if (ioc->raid_data.pIocPg3) {
pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
while (numPDisk) {
if (id == pPDisk->PhysDiskNum) {
pSpi->dvStatus[pPDisk->PhysDiskID] =
(MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
pSpi->forceDv = MPT_SCSICFG_NEED_DV;
ddvtprintk(("NEED_DV set for phys disk id %d\n",
pPDisk->PhysDiskID));
break;
}
pPDisk++;
numPDisk--;
}
if (numPDisk == 0) {
/* The physical disk that needs DV was not found
* in the stored IOC Page 3. The driver must reload
* this page. DV routine will set the NEED_DV flag for
* all phys disks that have DV_NOT_DONE set.
*/
pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
}
}
}
#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
EXPORT_SYMBOL(mptscsih_remove);
EXPORT_SYMBOL(mptscsih_shutdown);
#ifdef CONFIG_PM
......
......@@ -60,16 +60,6 @@
#define MPT_SCSI_MAX_SECTORS 8192
/* To disable domain validation, uncomment the
* following line. No effect for FC devices.
* For SCSI devices, driver will negotiate to
* NVRAM settings (if available) or to maximum adapter
* capabilities.
*/
#define MPTSCSIH_ENABLE_DOMAIN_VALIDATION
/* SCSI driver setup structure. Settings can be overridden
* by command line options.
*/
......@@ -109,3 +99,4 @@ extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
extern void mptscsih_timer_expired(unsigned long data);
extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
extern int mptscsih_raid_id_to_num(MPT_SCSI_HOST *hd, uint physdiskid);
......@@ -56,12 +56,15 @@
#include <linux/reboot.h> /* notifier code */
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/raid_class.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
#include "mptbase.h"
#include "mptscsih.h"
......@@ -76,20 +79,6 @@ MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
/* Command line args */
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
static int mpt_dv = MPTSCSIH_DOMAIN_VALIDATION;
module_param(mpt_dv, int, 0);
MODULE_PARM_DESC(mpt_dv, " DV Algorithm: enhanced=1, basic=0 (default=MPTSCSIH_DOMAIN_VALIDATION=1)");
static int mpt_width = MPTSCSIH_MAX_WIDTH;
module_param(mpt_width, int, 0);
MODULE_PARM_DESC(mpt_width, " Max Bus Width: wide=1, narrow=0 (default=MPTSCSIH_MAX_WIDTH=1)");
static ushort mpt_factor = MPTSCSIH_MIN_SYNC;
module_param(mpt_factor, ushort, 0);
MODULE_PARM_DESC(mpt_factor, " Min Sync Factor (default=MPTSCSIH_MIN_SYNC=0x08)");
#endif
static int mpt_saf_te = MPTSCSIH_SAF_TE;
module_param(mpt_saf_te, int, 0);
MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
......@@ -98,10 +87,308 @@ static int mpt_pq_filter = 0;
module_param(mpt_pq_filter, int, 0);
MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
static void mptspi_write_offset(struct scsi_target *, int);
static void mptspi_write_width(struct scsi_target *, int);
static int mptspi_write_spi_device_pg1(struct scsi_target *,
struct _CONFIG_PAGE_SCSI_DEVICE_1 *);
static struct scsi_transport_template *mptspi_transport_template = NULL;
static int mptspiDoneCtx = -1;
static int mptspiTaskCtx = -1;
static int mptspiInternalCtx = -1; /* Used only for internal commands */
static int mptspi_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)shost->hostdata;
int ret;
if (hd == NULL)
return -ENODEV;
ret = mptscsih_target_alloc(starget);
if (ret)
return ret;
/* if we're a device on virtual channel 1 and we're not part
* of an array, just return here (otherwise the setup below
* may actually affect a real physical device on channel 0 */
if (starget->channel == 1 &&
mptscsih_raid_id_to_num(hd, starget->id) < 0)
return 0;
if (hd->ioc->spi_data.nvram &&
hd->ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) {
u32 nvram = hd->ioc->spi_data.nvram[starget->id];
spi_min_period(starget) = (nvram & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
spi_max_width(starget) = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
} else {
spi_min_period(starget) = hd->ioc->spi_data.minSyncFactor;
spi_max_width(starget) = hd->ioc->spi_data.maxBusWidth;
}
spi_max_offset(starget) = hd->ioc->spi_data.maxSyncOffset;
spi_offset(starget) = 0;
mptspi_write_width(starget, 0);
return 0;
}
static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
struct _CONFIG_PAGE_SCSI_DEVICE_0 *pass_pg0)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)shost->hostdata;
struct _MPT_ADAPTER *ioc = hd->ioc;
struct _CONFIG_PAGE_SCSI_DEVICE_0 *pg0;
dma_addr_t pg0_dma;
int size;
struct _x_config_parms cfg;
struct _CONFIG_PAGE_HEADER hdr;
int err = -EBUSY;
/* No SPI parameters for RAID devices */
if (starget->channel == 0 &&
(hd->ioc->raid_data.isRaid & (1 << starget->id)))
return -1;
size = ioc->spi_data.sdp0length * 4;
/*
if (ioc->spi_data.sdp0length & 1)
size += size + 4;
size += 2048;
*/
pg0 = dma_alloc_coherent(&ioc->pcidev->dev, size, &pg0_dma, GFP_KERNEL);
if (pg0 == NULL) {
starget_printk(KERN_ERR, starget, "dma_alloc_coherent for parameters failed\n");
return -EINVAL;
}
memset(&hdr, 0, sizeof(hdr));
hdr.PageVersion = ioc->spi_data.sdp0version;
hdr.PageLength = ioc->spi_data.sdp0length;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
memset(&cfg, 0, sizeof(cfg));
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = pg0_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0;
cfg.pageAddr = starget->id;
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, "mpt_config failed\n");
goto out_free;
}
err = 0;
memcpy(pass_pg0, pg0, size);
out_free:
dma_free_coherent(&ioc->pcidev->dev, size, pg0, pg0_dma);
return err;
}
static u32 mptspi_getRP(struct scsi_target *starget)
{
u32 nego = 0;
nego |= spi_iu(starget) ? MPI_SCSIDEVPAGE1_RP_IU : 0;
nego |= spi_dt(starget) ? MPI_SCSIDEVPAGE1_RP_DT : 0;
nego |= spi_qas(starget) ? MPI_SCSIDEVPAGE1_RP_QAS : 0;
nego |= spi_hold_mcs(starget) ? MPI_SCSIDEVPAGE1_RP_HOLD_MCS : 0;
nego |= spi_wr_flow(starget) ? MPI_SCSIDEVPAGE1_RP_WR_FLOW : 0;
nego |= spi_rd_strm(starget) ? MPI_SCSIDEVPAGE1_RP_RD_STRM : 0;
nego |= spi_rti(starget) ? MPI_SCSIDEVPAGE1_RP_RTI : 0;
nego |= spi_pcomp_en(starget) ? MPI_SCSIDEVPAGE1_RP_PCOMP_EN : 0;
nego |= (spi_period(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD) & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
nego |= (spi_offset(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET) & MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
nego |= spi_width(starget) ? MPI_SCSIDEVPAGE1_RP_WIDE : 0;
return nego;
}
static void mptspi_read_parameters(struct scsi_target *starget)
{
int nego;
struct _CONFIG_PAGE_SCSI_DEVICE_0 pg0;
mptspi_read_spi_device_pg0(starget, &pg0);
nego = le32_to_cpu(pg0.NegotiatedParameters);
spi_iu(starget) = (nego & MPI_SCSIDEVPAGE0_NP_IU) ? 1 : 0;
spi_dt(starget) = (nego & MPI_SCSIDEVPAGE0_NP_DT) ? 1 : 0;
spi_qas(starget) = (nego & MPI_SCSIDEVPAGE0_NP_QAS) ? 1 : 0;
spi_wr_flow(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WR_FLOW) ? 1 : 0;
spi_rd_strm(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RD_STRM) ? 1 : 0;
spi_rti(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RTI) ? 1 : 0;
spi_pcomp_en(starget) = (nego & MPI_SCSIDEVPAGE0_NP_PCOMP_EN) ? 1 : 0;
spi_hold_mcs(starget) = (nego & MPI_SCSIDEVPAGE0_NP_HOLD_MCS) ? 1 : 0;
spi_period(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
spi_offset(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
}
static int
mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, int disk)
{
MpiRaidActionRequest_t *pReq;
MPT_FRAME_HDR *mf;
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(hd->ioc->InternalCtx, hd->ioc)) == NULL) {
ddvprintk((MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
hd->ioc->name));
return -EAGAIN;
}
pReq = (MpiRaidActionRequest_t *)mf;
if (quiesce)
pReq->Action = MPI_RAID_ACTION_QUIESCE_PHYS_IO;
else
pReq->Action = MPI_RAID_ACTION_ENABLE_PHYS_IO;
pReq->Reserved1 = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_RAID_ACTION;
pReq->VolumeID = disk;
pReq->VolumeBus = 0;
pReq->PhysDiskNum = 0;
pReq->MsgFlags = 0;
pReq->Reserved2 = 0;
pReq->ActionDataWord = 0; /* Reserved for this action */
mpt_add_sge((char *)&pReq->ActionDataSGE,
MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action %x id %d\n",
hd->ioc->name, action, io->id));
hd->pLocal = NULL;
hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
hd->scandv_wait_done = 0;
/* Save cmd pointer, for resource free if timeout or
* FW reload occurs
*/
hd->cmdPtr = mf;
add_timer(&hd->timer);
mpt_put_msg_frame(hd->ioc->InternalCtx, hd->ioc, mf);
wait_event(hd->scandv_waitq, hd->scandv_wait_done);
if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0))
return -1;
return 0;
}
static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
struct scsi_device *sdev)
{
VirtTarget *vtarget = scsi_target(sdev)->hostdata;
/* no DV on RAID devices */
if (sdev->channel == 0 &&
(hd->ioc->raid_data.isRaid & (1 << sdev->id)))
return;
/* If this is a piece of a RAID, then quiesce first */
if (sdev->channel == 1 &&
mptscsih_quiesce_raid(hd, 1, vtarget->target_id) < 0) {
starget_printk(KERN_ERR, scsi_target(sdev),
"Integrated RAID quiesce failed\n");
return;
}
spi_dv_device(sdev);
if (sdev->channel == 1 &&
mptscsih_quiesce_raid(hd, 0, vtarget->target_id) < 0)
starget_printk(KERN_ERR, scsi_target(sdev),
"Integrated RAID resume failed\n");
mptspi_read_parameters(sdev->sdev_target);
spi_display_xfer_agreement(sdev->sdev_target);
mptspi_read_parameters(sdev->sdev_target);
}
static int mptspi_slave_alloc(struct scsi_device *sdev)
{
int ret;
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)sdev->host->hostdata;
/* gcc doesn't see that all uses of this variable occur within
* the if() statements, so stop it from whining */
int physdisknum = 0;
if (sdev->channel == 1) {
physdisknum = mptscsih_raid_id_to_num(hd, sdev->id);
if (physdisknum < 0)
return physdisknum;
}
ret = mptscsih_slave_alloc(sdev);
if (ret)
return ret;
if (sdev->channel == 1) {
VirtDevice *vdev = sdev->hostdata;
sdev->no_uld_attach = 1;
vdev->vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
/* The real channel for this device is zero */
vdev->bus_id = 0;
/* The actual physdisknum (for RAID passthrough) */
vdev->target_id = physdisknum;
}
return 0;
}
static int mptspi_slave_configure(struct scsi_device *sdev)
{
int ret = mptscsih_slave_configure(sdev);
struct _MPT_SCSI_HOST *hd =
(struct _MPT_SCSI_HOST *)sdev->host->hostdata;
if (ret)
return ret;
if ((sdev->channel == 1 ||
!(hd->ioc->raid_data.isRaid & (1 << sdev->id))) &&
!spi_initial_dv(sdev->sdev_target))
mptspi_dv_device(hd, sdev);
return 0;
}
static void mptspi_slave_destroy(struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
VirtDevice *vdevice = sdev->hostdata;
/* Will this be the last lun on a non-raid device? */
if (vtarget->num_luns == 1 && vdevice->configured_lun) {
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
/* Async Narrow */
pg1.RequestedParameters = 0;
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
mptscsih_slave_destroy(sdev);
}
static struct scsi_host_template mptspi_driver_template = {
.module = THIS_MODULE,
.proc_name = "mptspi",
......@@ -109,11 +396,11 @@ static struct scsi_host_template mptspi_driver_template = {
.name = "MPT SPI Host",
.info = mptscsih_info,
.queuecommand = mptscsih_qcmd,
.target_alloc = mptscsih_target_alloc,
.slave_alloc = mptscsih_slave_alloc,
.slave_configure = mptscsih_slave_configure,
.target_alloc = mptspi_target_alloc,
.slave_alloc = mptspi_slave_alloc,
.slave_configure = mptspi_slave_configure,
.target_destroy = mptscsih_target_destroy,
.slave_destroy = mptscsih_slave_destroy,
.slave_destroy = mptspi_slave_destroy,
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
......@@ -128,6 +415,360 @@ static struct scsi_host_template mptspi_driver_template = {
.use_clustering = ENABLE_CLUSTERING,
};
static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
struct _CONFIG_PAGE_SCSI_DEVICE_1 *pass_pg1)
{
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)shost->hostdata;
struct _MPT_ADAPTER *ioc = hd->ioc;
struct _CONFIG_PAGE_SCSI_DEVICE_1 *pg1;
dma_addr_t pg1_dma;
int size;
struct _x_config_parms cfg;
struct _CONFIG_PAGE_HEADER hdr;
int err = -EBUSY;
/* don't allow updating nego parameters on RAID devices */
if (starget->channel == 0 &&
(hd->ioc->raid_data.isRaid & (1 << starget->id)))
return -1;
size = ioc->spi_data.sdp1length * 4;
pg1 = dma_alloc_coherent(&ioc->pcidev->dev, size, &pg1_dma, GFP_KERNEL);
if (pg1 == NULL) {
starget_printk(KERN_ERR, starget, "dma_alloc_coherent for parameters failed\n");
return -EINVAL;
}
memset(&hdr, 0, sizeof(hdr));
hdr.PageVersion = ioc->spi_data.sdp1version;
hdr.PageLength = ioc->spi_data.sdp1length;
hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
memset(&cfg, 0, sizeof(cfg));
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = pg1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1;
cfg.pageAddr = starget->id;
memcpy(pg1, pass_pg1, size);
pg1->Header.PageVersion = hdr.PageVersion;
pg1->Header.PageLength = hdr.PageLength;
pg1->Header.PageNumber = hdr.PageNumber;
pg1->Header.PageType = hdr.PageType;
if (mpt_config(ioc, &cfg)) {
starget_printk(KERN_ERR, starget, "mpt_config failed\n");
goto out_free;
}
err = 0;
out_free:
dma_free_coherent(&ioc->pcidev->dev, size, pg1, pg1_dma);
return err;
}
static void mptspi_write_offset(struct scsi_target *starget, int offset)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (offset < 0)
offset = 0;
if (offset > 255)
offset = 255;
if (spi_offset(starget) == -1)
mptspi_read_parameters(starget);
spi_offset(starget) = offset;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_period(struct scsi_target *starget, int period)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (period < 8)
period = 8;
if (period > 255)
period = 255;
if (spi_period(starget) == -1)
mptspi_read_parameters(starget);
if (period == 8) {
spi_iu(starget) = 1;
spi_dt(starget) = 1;
} else if (period == 9) {
spi_dt(starget) = 1;
}
spi_period(starget) = period;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_dt(struct scsi_target *starget, int dt)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (spi_period(starget) == -1)
mptspi_read_parameters(starget);
if (!dt && spi_period(starget) < 10)
spi_period(starget) = 10;
spi_dt(starget) = dt;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_iu(struct scsi_target *starget, int iu)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (spi_period(starget) == -1)
mptspi_read_parameters(starget);
if (!iu && spi_period(starget) < 9)
spi_period(starget) = 9;
spi_iu(starget) = iu;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
#define MPTSPI_SIMPLE_TRANSPORT_PARM(parm) \
static void mptspi_write_##parm(struct scsi_target *starget, int parm)\
{ \
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; \
u32 nego; \
\
spi_##parm(starget) = parm; \
\
nego = mptspi_getRP(starget); \
\
pg1.RequestedParameters = cpu_to_le32(nego); \
pg1.Reserved = 0; \
pg1.Configuration = 0; \
\
mptspi_write_spi_device_pg1(starget, &pg1); \
}
MPTSPI_SIMPLE_TRANSPORT_PARM(rd_strm)
MPTSPI_SIMPLE_TRANSPORT_PARM(wr_flow)
MPTSPI_SIMPLE_TRANSPORT_PARM(rti)
MPTSPI_SIMPLE_TRANSPORT_PARM(hold_mcs)
MPTSPI_SIMPLE_TRANSPORT_PARM(pcomp_en)
static void mptspi_write_qas(struct scsi_target *starget, int qas)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)shost->hostdata;
VirtTarget *vtarget = starget->hostdata;
u32 nego;
if ((vtarget->negoFlags & MPT_TARGET_NO_NEGO_QAS) ||
hd->ioc->spi_data.noQas)
spi_qas(starget) = 0;
else
spi_qas(starget) = qas;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
static void mptspi_write_width(struct scsi_target *starget, int width)
{
struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
u32 nego;
if (!width) {
spi_dt(starget) = 0;
if (spi_period(starget) < 10)
spi_period(starget) = 10;
}
spi_width(starget) = width;
nego = mptspi_getRP(starget);
pg1.RequestedParameters = cpu_to_le32(nego);
pg1.Reserved = 0;
pg1.Configuration = 0;
mptspi_write_spi_device_pg1(starget, &pg1);
}
struct work_queue_wrapper {
struct work_struct work;
struct _MPT_SCSI_HOST *hd;
int disk;
};
static void mpt_work_wrapper(void *data)
{
struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
struct _MPT_SCSI_HOST *hd = wqw->hd;
struct Scsi_Host *shost = hd->ioc->sh;
struct scsi_device *sdev;
int disk = wqw->disk;
struct _CONFIG_PAGE_IOC_3 *pg3;
kfree(wqw);
mpt_findImVolumes(hd->ioc);
pg3 = hd->ioc->raid_data.pIocPg3;
if (!pg3)
return;
shost_for_each_device(sdev,shost) {
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
/* only want to search RAID components */
if (sdev->channel != 1)
continue;
/* The target_id is the raid PhysDiskNum, even if
* starget->id is the actual target address */
if(vtarget->target_id != disk)
continue;
starget_printk(KERN_INFO, vtarget->starget,
"Integrated RAID requests DV of new device\n");
mptspi_dv_device(hd, sdev);
}
shost_printk(KERN_INFO, shost,
"Integrated RAID detects new device %d\n", disk);
scsi_scan_target(&hd->ioc->sh->shost_gendev, 1, disk, 0, 1);
}
static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk)
{
struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
if (!wqw) {
shost_printk(KERN_ERR, hd->ioc->sh,
"Failed to act on RAID event for physical disk %d\n",
disk);
return;
}
INIT_WORK(&wqw->work, mpt_work_wrapper, wqw);
wqw->hd = hd;
wqw->disk = disk;
schedule_work(&wqw->work);
}
static int
mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
{
u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
int reason
= (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
int disk = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
mpt_dv_raid(hd, disk);
}
}
return mptscsih_event_process(ioc, pEvReply);
}
static int
mptspi_deny_binding(struct scsi_target *starget)
{
struct _MPT_SCSI_HOST *hd =
(struct _MPT_SCSI_HOST *)dev_to_shost(starget->dev.parent)->hostdata;
return ((hd->ioc->raid_data.isRaid & (1 << starget->id)) &&
starget->channel == 0) ? 1 : 0;
}
static struct spi_function_template mptspi_transport_functions = {
.get_offset = mptspi_read_parameters,
.set_offset = mptspi_write_offset,
.show_offset = 1,
.get_period = mptspi_read_parameters,
.set_period = mptspi_write_period,
.show_period = 1,
.get_width = mptspi_read_parameters,
.set_width = mptspi_write_width,
.show_width = 1,
.get_iu = mptspi_read_parameters,
.set_iu = mptspi_write_iu,
.show_iu = 1,
.get_dt = mptspi_read_parameters,
.set_dt = mptspi_write_dt,
.show_dt = 1,
.get_qas = mptspi_read_parameters,
.set_qas = mptspi_write_qas,
.show_qas = 1,
.get_wr_flow = mptspi_read_parameters,
.set_wr_flow = mptspi_write_wr_flow,
.show_wr_flow = 1,
.get_rd_strm = mptspi_read_parameters,
.set_rd_strm = mptspi_write_rd_strm,
.show_rd_strm = 1,
.get_rti = mptspi_read_parameters,
.set_rti = mptspi_write_rti,
.show_rti = 1,
.get_pcomp_en = mptspi_read_parameters,
.set_pcomp_en = mptspi_write_pcomp_en,
.show_pcomp_en = 1,
.get_hold_mcs = mptspi_read_parameters,
.set_hold_mcs = mptspi_write_hold_mcs,
.show_hold_mcs = 1,
.deny_binding = mptspi_deny_binding,
};
/****************************************************************************
* Supported hardware
......@@ -242,6 +883,13 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sh->max_id = MPT_MAX_SCSI_DEVICES;
sh->max_lun = MPT_LAST_LUN + 1;
/*
* If RAID Firmware Detected, setup virtual channel
*/
if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
> MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
sh->max_channel = 1;
else
sh->max_channel = 0;
sh->this_id = ioc->pfacts[0].PortSCSIID;
......@@ -301,7 +949,8 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* indicates a device exists.
* max_id = 1 + maximum id (hosts.h)
*/
hd->Targets = kcalloc(sh->max_id, sizeof(void *), GFP_ATOMIC);
hd->Targets = kcalloc(sh->max_id * (sh->max_channel + 1),
sizeof(void *), GFP_ATOMIC);
if (!hd->Targets) {
error = -ENOMEM;
goto out_mptspi_probe;
......@@ -334,49 +983,23 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->spi_data.Saf_Te = mpt_saf_te;
hd->mpt_pq_filter = mpt_pq_filter;
#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
if (ioc->spi_data.maxBusWidth > mpt_width)
ioc->spi_data.maxBusWidth = mpt_width;
if (ioc->spi_data.minSyncFactor < mpt_factor)
ioc->spi_data.minSyncFactor = mpt_factor;
if (ioc->spi_data.minSyncFactor == MPT_ASYNC) {
ioc->spi_data.maxSyncOffset = 0;
}
ioc->spi_data.mpt_dv = mpt_dv;
hd->negoNvram = 0;
ddvprintk((MYIOC_s_INFO_FMT
"dv %x width %x factor %x saf_te %x mpt_pq_filter %x\n",
ioc->name,
mpt_dv,
mpt_width,
mpt_factor,
mpt_saf_te,
mpt_pq_filter));
#else
hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
ddvprintk((MYIOC_s_INFO_FMT
"saf_te %x mpt_pq_filter %x\n",
ioc->name,
mpt_saf_te,
mpt_pq_filter));
#endif
ioc->spi_data.forceDv = 0;
ioc->spi_data.noQas = 0;
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++)
ioc->spi_data.dvStatus[ii] =
MPT_SCSICFG_NEGOTIATE;
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++)
ioc->spi_data.dvStatus[ii] |=
MPT_SCSICFG_DV_NOT_DONE;
init_waitqueue_head(&hd->scandv_waitq);
hd->scandv_wait_done = 0;
hd->last_queue_full = 0;
/* Some versions of the firmware don't support page 0; without
* that we can't get the parameters */
if (hd->ioc->spi_data.sdp0length != 0)
sh->transportt = mptspi_transport_template;
error = scsi_add_host (sh, &ioc->pcidev->dev);
if(error) {
dprintk((KERN_ERR MYNAM
......@@ -423,14 +1046,17 @@ static struct pci_driver mptspi_driver = {
static int __init
mptspi_init(void)
{
show_mptmod_ver(my_NAME, my_VERSION);
mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions);
if (!mptspi_transport_template)
return -ENODEV;
mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER);
mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER);
mptspiInternalCtx = mpt_register(mptscsih_scandv_complete, MPTSPI_DRIVER);
if (mpt_event_register(mptspiDoneCtx, mptscsih_event_process) == 0) {
if (mpt_event_register(mptspiDoneCtx, mptspi_event_process) == 0) {
devtprintk((KERN_INFO MYNAM
": Registered for IOC event notifications\n"));
}
......@@ -465,6 +1091,7 @@ mptspi_exit(void)
mpt_deregister(mptspiInternalCtx);
mpt_deregister(mptspiTaskCtx);
mpt_deregister(mptspiDoneCtx);
spi_release_transport(mptspi_transport_template);
}
module_init(mptspi_init);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册