提交 211c8d49 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (59 commits)
  [SCSI] replace __FUNCTION__ with __func__
  [SCSI] extend the last_sector_bug flag to cover more sectors
  [SCSI] qla2xxx: Update version number to 8.02.01-k6.
  [SCSI] qla2xxx: Additional NPIV corrections.
  [SCSI] qla2xxx: suppress uninitialized-var warning
  [SCSI] qla2xxx: use memory_read_from_buffer()
  [SCSI] qla2xxx: Issue proper ISP callbacks during stop-firmware.
  [SCSI] ch: fix ch_remove oops
  [SCSI] 3w-9xxx: add MSI support and misc fixes
  [SCSI] scsi_lib: use blk_rq_tagged in scsi_request_fn
  [SCSI] ibmvfc: Update driver version to 1.0.1
  [SCSI] ibmvfc: Add ADISC support
  [SCSI] ibmvfc: Miscellaneous fixes
  [SCSI] ibmvfc: Fix hang on module removal
  [SCSI] ibmvfc: Target refcounting fixes
  [SCSI] ibmvfc: Reduce unnecessary log noise
  [SCSI] sym53c8xx: free luntbl in sym_hcb_free
  [SCSI] scsi_scan.c: Release mutex in error handling code
  [SCSI] scsi_eh_prep_cmnd should save scmd->underflow
  [SCSI] sd: Support for SCSI disk (SBC) Data Integrity Field
  ...
......@@ -147,9 +147,12 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private;
list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
list_del(&pgpath->list);
if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
free_pgpath(pgpath);
}
......@@ -548,6 +551,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
{
int r;
struct pgpath *p;
struct multipath *m = ti->private;
/* we need at least a path arg */
if (as->argc < 1) {
......@@ -566,6 +570,15 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
goto bad;
}
if (m->hw_handler_name) {
r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
m->hw_handler_name);
if (r < 0) {
dm_put_device(ti, p->path.dev);
goto bad;
}
}
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
......
......@@ -273,12 +273,12 @@ mpt_fault_reset_work(struct work_struct *work)
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __FUNCTION__);
ioc->name, __func__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
__FUNCTION__, (rc == 0) ? "success" : "failed");
__func__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
......@@ -356,7 +356,7 @@ mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__FUNCTION__, ioc->name, cb_idx);
__func__, ioc->name, cb_idx);
goto out;
}
......@@ -420,7 +420,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__FUNCTION__, ioc->name, cb_idx);
__func__, ioc->name, cb_idx);
freeme = 0;
goto out;
}
......@@ -2434,7 +2434,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->cached_fw != NULL) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
"adapter\n", __FUNCTION__, ioc->name));
"adapter\n", __func__, ioc->name));
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
ioc->cached_fw, CAN_SLEEP)) < 0) {
printk(MYIOC_s_WARN_FMT
......@@ -3693,7 +3693,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
"address=%p\n", ioc->name, __FUNCTION__,
"address=%p\n", ioc->name, __func__,
&ioc->chip->Doorbell, &ioc->chip->Reset_1078));
CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
if (sleepFlag == CAN_SLEEP)
......@@ -4742,12 +4742,12 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
break;
}
printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
printk("%s: persist_opcode=%x\n",__func__, persist_opcode);
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
printk("%s: no msg frames!\n",__FUNCTION__);
printk("%s: no msg frames!\n",__func__);
return -1;
}
......@@ -4771,13 +4771,13 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
(SasIoUnitControlReply_t *)ioc->persist_reply_frame;
if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
__FUNCTION__,
__func__,
sasIoUnitCntrReply->IOCStatus,
sasIoUnitCntrReply->IOCLogInfo);
return -1;
}
printk("%s: success\n",__FUNCTION__);
printk("%s: success\n",__func__);
return 0;
}
......@@ -5784,7 +5784,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
ioc->name,__FUNCTION__));
ioc->name,__func__));
return -1;
}
......
......@@ -505,7 +505,7 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
event = le32_to_cpu(pEvReply->Event) & 0xFF;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n",
ioc->name, __FUNCTION__));
ioc->name, __func__));
if(async_queue == NULL)
return 1;
......@@ -2482,7 +2482,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
*/
if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
ioc->name,__FUNCTION__));
ioc->name,__func__));
goto out;
}
......
......@@ -231,28 +231,28 @@ static int
mptfc_abort(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_abort, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
}
static int
mptfc_dev_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
}
static int
mptfc_bus_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
}
static int
mptfc_host_reset(struct scsi_cmnd *SCpnt)
{
return
mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __FUNCTION__);
mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__);
}
static void
......
......@@ -610,7 +610,7 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FUNCTION__, sent));
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
......@@ -676,7 +676,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FUNCTION__, sent));
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
......@@ -715,7 +715,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
u16 cur_naa = 0x1000;
dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
__FUNCTION__, skb));
__func__, skb));
spin_lock_irqsave(&priv->txfidx_lock, flags);
if (priv->mpt_txfidx_tail < 0) {
......@@ -723,7 +723,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: no tx context available: %u\n",
__FUNCTION__, priv->mpt_txfidx_tail);
__func__, priv->mpt_txfidx_tail);
return 1;
}
......@@ -733,7 +733,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&priv->txfidx_lock, flags);
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__FUNCTION__);
__func__);
return 1;
}
......@@ -1208,7 +1208,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
__FUNCTION__, buckets, curr));
__func__, buckets, curr));
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
......@@ -1217,9 +1217,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
if (mf == NULL) {
printk (KERN_ERR "%s: Unable to alloc request frame\n",
__FUNCTION__);
__func__);
dioprintk((KERN_ERR "%s: %u buckets remaining\n",
__FUNCTION__, buckets));
__func__, buckets));
goto out;
}
pRecvReq = (LANReceivePostRequest_t *) mf;
......@@ -1244,7 +1244,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
spin_lock_irqsave(&priv->rxfidx_lock, flags);
if (priv->mpt_rxfidx_tail < 0) {
printk (KERN_ERR "%s: Can't alloc context\n",
__FUNCTION__);
__func__);
spin_unlock_irqrestore(&priv->rxfidx_lock,
flags);
break;
......@@ -1267,7 +1267,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
if (skb == NULL) {
printk (KERN_WARNING
MYNAM "/%s: Can't alloc skb\n",
__FUNCTION__);
__func__);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
break;
......@@ -1305,7 +1305,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
if (pSimple == NULL) {
/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
/**/ __FUNCTION__);
/**/ __func__);
mpt_free_msg_frame(mpt_dev, mf);
goto out;
}
......@@ -1329,9 +1329,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
out:
dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
__FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
__func__, buckets, atomic_read(&priv->buckets_out)));
dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
__FUNCTION__, priv->total_posted, priv->total_received));
__func__, priv->total_posted, priv->total_received));
clear_bit(0, &priv->post_buckets_active);
}
......
......@@ -300,7 +300,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
phy_info = port_info->phy_info;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __FUNCTION__, port_details,
"bitmask=0x%016llX\n", ioc->name, __func__, port_details,
port_details->num_phys, (unsigned long long)
port_details->phy_bitmask));
......@@ -411,7 +411,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
*/
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: deleting phy = %d\n",
ioc->name, __FUNCTION__, port_details, i));
ioc->name, __func__, port_details, i));
port_details->num_phys--;
port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
......@@ -497,7 +497,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
continue;
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: [%p]: phy_id=%02d num_phys=%02d "
"bitmask=0x%016llX\n", ioc->name, __FUNCTION__,
"bitmask=0x%016llX\n", ioc->name, __func__,
port_details, i, port_details->num_phys,
(unsigned long long)port_details->phy_bitmask));
dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
......@@ -553,7 +553,7 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
ioc->name,__FUNCTION__, __LINE__));
ioc->name,__func__, __LINE__));
return 0;
}
......@@ -606,7 +606,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
GFP_ATOMIC);
if (!target_reset_list) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
ioc->name,__FUNCTION__, __LINE__));
ioc->name,__func__, __LINE__));
return;
}
......@@ -673,7 +673,7 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
ioc->name,__FUNCTION__, __LINE__));
ioc->name,__func__, __LINE__));
return;
}
......@@ -1183,7 +1183,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
ioc->name, __FUNCTION__, reply->IOCStatus, reply->IOCLogInfo);
ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
error = -ENXIO;
goto out_unlock;
}
......@@ -1270,14 +1270,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (!rsp) {
printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
ioc->name, __FUNCTION__);
ioc->name, __func__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
ioc->name, __FUNCTION__, req->bio->bi_vcnt, req->data_len,
ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
......@@ -1343,7 +1343,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!timeleft) {
printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __FUNCTION__);
printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
/* On timeout reset the board */
mpt_HardResetHandler(ioc, CAN_SLEEP);
ret = -ETIMEDOUT;
......@@ -1361,7 +1361,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
rsp->data_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
ioc->name, __FUNCTION__);
ioc->name, __func__);
ret = -ENXIO;
}
unmap:
......@@ -2006,7 +2006,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
goto out;
}
mptsas_set_port(ioc, phy_info, port);
......@@ -2076,7 +2076,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
goto out;
}
......@@ -2085,7 +2085,7 @@ static int mptsas_probe_one_phy(struct device *dev,
if (error) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
sas_rphy_free(rphy);
goto out;
}
......@@ -2613,7 +2613,7 @@ mptsas_hotplug_work(struct work_struct *work)
(ev->channel << 8) + ev->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
phy_info = mptsas_find_phyinfo_by_sas_address(
......@@ -2633,20 +2633,20 @@ mptsas_hotplug_work(struct work_struct *work)
if (!phy_info){
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
if (!phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
rphy = mptsas_get_rphy(phy_info);
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
......@@ -2654,7 +2654,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
......@@ -2665,7 +2665,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
......@@ -2720,7 +2720,7 @@ mptsas_hotplug_work(struct work_struct *work)
(ev->channel << 8) + ev->id)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
......@@ -2732,7 +2732,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!phy_info || !phy_info->port_details) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
......@@ -2744,7 +2744,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!vtarget) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
/*
......@@ -2767,7 +2767,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (mptsas_get_rphy(phy_info)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
if (ev->channel) printk("%d\n", __LINE__);
break;
}
......@@ -2776,7 +2776,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!port) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break;
}
memcpy(&phy_info->attached, &sas_device,
......@@ -2801,7 +2801,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (!rphy) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
break; /* non-fatal: an rphy can be added later */
}
......@@ -2809,7 +2809,7 @@ mptsas_hotplug_work(struct work_struct *work)
if (sas_rphy_add(rphy)) {
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
"%s: exit at line=%d\n", ioc->name,
__FUNCTION__, __LINE__));
__func__, __LINE__));
sas_rphy_free(rphy);
break;
}
......
......@@ -461,7 +461,7 @@ mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
ioc->name,__FUNCTION__));
ioc->name,__func__));
return;
}
......@@ -2187,7 +2187,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
(ioc->debug_level & MPT_DEBUG_TM ))
printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
"iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
"term_cmnds=%d\n", __FUNCTION__, ioc->id, pScsiTmReply->Bus,
"term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
pScsiTmReply->TargetID, pScsiTmReq->TaskType,
le16_to_cpu(pScsiTmReply->IOCStatus),
le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
......
......@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -71,6 +71,10 @@
Add support for 9650SE controllers.
2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
2.26.02.010 - Add support for 9690SA controllers.
2.26.02.011 - Increase max AENs drained to 256.
Add MSI support and "use_msi" module parameter.
Fix bug in twa_get_param() on 4GB+.
Use pci_resource_len() for ioremap().
*/
#include <linux/module.h>
......@@ -95,7 +99,7 @@
#include "3w-9xxx.h"
/* Globals */
#define TW_DRIVER_VERSION "2.26.02.010"
#define TW_DRIVER_VERSION "2.26.02.011"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
......@@ -107,6 +111,10 @@ MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(TW_DRIVER_VERSION);
static int use_msi = 0;
module_param(use_msi, int, S_IRUGO);
MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
/* Function prototypes */
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
......@@ -1038,7 +1046,6 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
unsigned long param_value;
void *retval = NULL;
/* Setup the command packet */
......@@ -1057,9 +1064,8 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
param->table_id = cpu_to_le16(table_id | 0x8000);
param->parameter_id = cpu_to_le16(parameter_id);
param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
param_value = tw_dev->generic_buffer_phys[request_id];
command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value);
command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
/* Post the command packet to the board */
......@@ -2000,7 +2006,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
u32 mem_addr;
unsigned long mem_addr, mem_len;
int retval = -ENODEV;
retval = pci_enable_device(pdev);
......@@ -2045,13 +2051,16 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
goto out_free_device_extension;
}
if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
mem_addr = pci_resource_start(pdev, 1);
else
mem_len = pci_resource_len(pdev, 1);
} else {
mem_addr = pci_resource_start(pdev, 2);
mem_len = pci_resource_len(pdev, 2);
}
/* Save base address */
tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
goto out_release_mem_region;
......@@ -2086,7 +2095,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_drvdata(pdev, host);
printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n",
printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
host->host_no, mem_addr, pdev->irq);
printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
host->host_no,
......@@ -2097,6 +2106,11 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
/* Try to enable MSI */
if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
!pci_enable_msi(pdev))
set_bit(TW_USING_MSI, &tw_dev->flags);
/* Now setup the interrupt handler */
retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
if (retval) {
......@@ -2120,6 +2134,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
return 0;
out_remove_host:
if (test_bit(TW_USING_MSI, &tw_dev->flags))
pci_disable_msi(pdev);
scsi_remove_host(host);
out_iounmap:
iounmap(tw_dev->base_addr);
......@@ -2151,6 +2167,10 @@ static void twa_remove(struct pci_dev *pdev)
/* Shutdown the card */
__twa_shutdown(tw_dev);
/* Disable MSI if enabled */
if (test_bit(TW_USING_MSI, &tw_dev->flags))
pci_disable_msi(pdev);
/* Free IO remapping */
iounmap(tw_dev->base_addr);
......
......@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -319,8 +319,8 @@ static twa_message_type twa_error_table[] = {
/* Compatibility defines */
#define TW_9000_ARCH_ID 0x5
#define TW_CURRENT_DRIVER_SRL 30
#define TW_CURRENT_DRIVER_BUILD 80
#define TW_CURRENT_DRIVER_SRL 35
#define TW_CURRENT_DRIVER_BUILD 0
#define TW_CURRENT_DRIVER_BRANCH 0
/* Phase defines */
......@@ -352,8 +352,9 @@ static twa_message_type twa_error_table[] = {
#define TW_MAX_RESET_TRIES 2
#define TW_MAX_CMDS_PER_LUN 254
#define TW_MAX_RESPONSE_DRAIN 256
#define TW_MAX_AEN_DRAIN 40
#define TW_MAX_AEN_DRAIN 255
#define TW_IN_RESET 2
#define TW_USING_MSI 3
#define TW_IN_ATTENTION_LOOP 4
#define TW_MAX_SECTORS 256
#define TW_AEN_WAIT_TIME 1000
......
......@@ -63,6 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
config BLK_DEV_SD
tristate "SCSI disk support"
depends on SCSI
select CRC_T10DIF
---help---
If you want to use SCSI hard disks, Fibre Channel disks,
Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
......
......@@ -151,6 +151,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
sd_mod-objs := sd.o
sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
:= -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
......
......@@ -2278,7 +2278,7 @@ do { \
#define ASC_DBG(lvl, format, arg...) { \
if (asc_dbglvl >= (lvl)) \
printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \
__FUNCTION__ , ## arg); \
__func__ , ## arg); \
}
#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
......
......@@ -288,20 +288,20 @@ static LIST_HEAD(aha152x_host_list);
#define DO_LOCK(flags) \
do { \
if(spin_is_locked(&QLOCK)) { \
DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
} \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
spin_lock_irqsave(&QLOCK,flags); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
QLOCKER=__FUNCTION__; \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
QLOCKER=__func__; \
QLOCKERL=__LINE__; \
} while(0)
#define DO_UNLOCK(flags) \
do { \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
spin_unlock_irqrestore(&QLOCK,flags); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
QLOCKER="(not locked)"; \
QLOCKERL=0; \
} while(0)
......
......@@ -39,9 +39,9 @@
#ifdef ASD_ENTER_EXIT
#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
__FUNCTION__)
__func__)
#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
__FUNCTION__)
__func__)
#else
#define ENTER
#define EXIT
......
......@@ -1359,7 +1359,7 @@ int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
struct asd_ascb *ascb_list;
if (!phy_mask) {
asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__);
asd_printk("%s called with phy_mask of 0!?\n", __func__);
return 0;
}
......
......@@ -211,7 +211,7 @@ static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
phy->asd_port = port;
}
ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
__FUNCTION__, phy->asd_port->phy_mask, sas_phy->id);
__func__, phy->asd_port->phy_mask, sas_phy->id);
asd_update_port_links(asd_ha, phy);
spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
}
......@@ -294,7 +294,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
GFP_ATOMIC);
if (!cp) {
asd_printk("%s: out of memory\n", __FUNCTION__);
asd_printk("%s: out of memory\n", __func__);
goto out;
}
ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
......@@ -446,7 +446,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
struct domain_device *failed_dev = NULL;
ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
__FUNCTION__, dl->status_block[3]);
__func__, dl->status_block[3]);
/*
* Find the task that caused the abort and abort it first.
......@@ -474,7 +474,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
if (!failed_dev) {
ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
__FUNCTION__, tc_abort);
__func__, tc_abort);
goto out;
}
......@@ -502,7 +502,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
conn_handle = *((u16*)(&dl->status_block[1]));
conn_handle = le16_to_cpu(conn_handle);
ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__,
dl->status_block[3]);
/* Find the last pending task for the device... */
......@@ -522,7 +522,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
if (!last_dev_task) {
ASD_DPRINTK("%s: Device reset for idle device %d?\n",
__FUNCTION__, conn_handle);
__func__, conn_handle);
goto out;
}
......@@ -549,10 +549,10 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
goto out;
}
case SIGNAL_NCQ_ERROR:
ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__);
goto out;
case CLEAR_NCQ_ERROR:
ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__);
goto out;
}
......@@ -560,26 +560,26 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
switch (sb_opcode) {
case BYTES_DMAED:
ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id);
ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id);
asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
break;
case PRIMITIVE_RECVD:
ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__,
ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__,
phy_id);
asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
break;
case PHY_EVENT:
ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id);
ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id);
asd_phy_event_tasklet(ascb, dl);
break;
case LINK_RESET_ERROR:
ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__,
ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__,
phy_id);
asd_link_reset_err_tasklet(ascb, dl, phy_id);
break;
case TIMER_EVENT:
ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
__FUNCTION__, phy_id);
__func__, phy_id);
asd_turn_led(asd_ha, phy_id, 0);
/* the device is gone */
sas_phy_disconnected(sas_phy);
......@@ -587,7 +587,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
break;
default:
ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
phy_id, sb_opcode);
ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
edb, dl->opcode);
......@@ -654,7 +654,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
if (status != 0) {
ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
__FUNCTION__, phy_id, status);
__func__, phy_id, status);
goto out;
}
......@@ -663,7 +663,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
asd_turn_led(asd_ha, phy_id, 0);
asd_control_led(asd_ha, phy_id, 0);
ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id);
ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id);
break;
case ENABLE_PHY:
......@@ -673,40 +673,40 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
get_lrate_mode(phy, oob_mode);
asd_turn_led(asd_ha, phy_id, 1);
ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
__FUNCTION__, phy_id,phy->sas_phy.linkrate,
__func__, phy_id,phy->sas_phy.linkrate,
phy->sas_phy.iproto);
} else if (oob_status & CURRENT_SPINUP_HOLD) {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 1);
ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__,
ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__,
phy_id);
} else if (oob_status & CURRENT_ERR_MASK) {
asd_turn_led(asd_ha, phy_id, 0);
ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
__FUNCTION__, phy_id, oob_status);
__func__, phy_id, oob_status);
} else if (oob_status & (CURRENT_HOT_PLUG_CNCT
| CURRENT_DEVICE_PRESENT)) {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 1);
ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
__FUNCTION__, phy_id);
__func__, phy_id);
} else {
asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
asd_turn_led(asd_ha, phy_id, 0);
ASD_DPRINTK("%s: phy%d: no device present: "
"oob_status:0x%x\n",
__FUNCTION__, phy_id, oob_status);
__func__, phy_id, oob_status);
}
break;
case RELEASE_SPINUP_HOLD:
case PHY_NO_OP:
case EXECUTE_HARD_RESET:
ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__,
ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__,
phy_id, control_phy->sub_func);
/* XXX finish */
break;
default:
ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__,
ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__,
phy_id, control_phy->sub_func);
break;
}
......
......@@ -320,7 +320,7 @@ static void asd_task_tasklet_complete(struct asd_ascb *ascb,
case TC_RESUME:
case TC_PARTIAL_SG_LIST:
default:
ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode);
ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
break;
}
......
......@@ -75,12 +75,12 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("%s: here\n", __FUNCTION__);
ASD_DPRINTK("%s: here\n", __func__);
if (!del_timer(&ascb->timer)) {
ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
return;
}
ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
tcs->dl_opcode = dl->opcode;
complete(ascb->completion);
asd_ascb_free(ascb);
......@@ -91,7 +91,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
struct asd_ascb *ascb = (void *)data;
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("%s: here\n", __FUNCTION__);
ASD_DPRINTK("%s: here\n", __func__);
tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
complete(ascb->completion);
}
......@@ -103,7 +103,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
DECLARE_COMPLETION_ONSTACK(completion); \
DECLARE_TCS(tcs); \
\
ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
ASD_DPRINTK("%s: PRE\n", __func__); \
res = 1; \
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
if (!ascb) \
......@@ -115,12 +115,12 @@ static void asd_clear_nexus_timedout(unsigned long data)
scb->header.opcode = CLEAR_NEXUS
#define CLEAR_NEXUS_POST \
ASD_DPRINTK("%s: POST\n", __FUNCTION__); \
ASD_DPRINTK("%s: POST\n", __func__); \
res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
asd_clear_nexus_timedout); \
if (res) \
goto out_err; \
ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
wait_for_completion(&completion); \
res = tcs.dl_opcode; \
if (res == TC_NO_ERROR) \
......@@ -417,7 +417,7 @@ int asd_abort_task(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
res = TMF_RESP_FUNC_COMPLETE;
ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
goto out_done;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
......@@ -481,7 +481,7 @@ int asd_abort_task(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
res = TMF_RESP_FUNC_COMPLETE;
ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
goto out_done;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
......
......@@ -240,7 +240,7 @@ static void __fas216_checkmagic(FAS216_Info *info, const char *func)
panic("scsi memory space corrupted in %s", func);
}
}
#define fas216_checkmagic(info) __fas216_checkmagic((info), __FUNCTION__)
#define fas216_checkmagic(info) __fas216_checkmagic((info), __func__)
#else
#define fas216_checkmagic(info)
#endif
......@@ -2658,7 +2658,7 @@ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
fas216_checkmagic(info);
printk("scsi%d.%c: %s: resetting host\n",
info->host->host_no, '0' + SCpnt->device->id, __FUNCTION__);
info->host->host_no, '0' + SCpnt->device->id, __func__);
/*
* Reset the SCSI chip.
......
......@@ -930,6 +930,7 @@ static int ch_probe(struct device *dev)
if (init)
ch_init_elem(ch);
dev_set_drvdata(dev, ch);
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
return 0;
......
......@@ -30,3 +30,11 @@ config SCSI_DH_EMC
depends on SCSI_DH
help
If you have a EMC CLARiiON select y. Otherwise, say N.
config SCSI_DH_ALUA
tristate "SPC-3 ALUA Device Handler (EXPERIMENTAL)"
depends on SCSI_DH && EXPERIMENTAL
help
SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
Access (ALUA).
......@@ -5,3 +5,4 @@ obj-$(CONFIG_SCSI_DH) += scsi_dh.o
obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o
......@@ -24,8 +24,16 @@
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
struct scsi_dh_devinfo_list {
struct list_head node;
char vendor[9];
char model[17];
struct scsi_device_handler *handler;
};
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
static LIST_HEAD(scsi_dh_dev_list);
static struct scsi_device_handler *get_device_handler(const char *name)
{
......@@ -33,7 +41,7 @@ static struct scsi_device_handler *get_device_handler(const char *name)
spin_lock(&list_lock);
list_for_each_entry(tmp, &scsi_dh_list, list) {
if (!strcmp(tmp->name, name)) {
if (!strncmp(tmp->name, name, strlen(tmp->name))) {
found = tmp;
break;
}
......@@ -42,11 +50,307 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
static struct scsi_device_handler *
scsi_dh_cache_lookup(struct scsi_device *sdev)
{
struct scsi_dh_devinfo_list *tmp;
struct scsi_device_handler *found_dh = NULL;
spin_lock(&list_lock);
list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
!strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
found_dh = tmp->handler;
break;
}
}
spin_unlock(&list_lock);
return found_dh;
}
static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
struct scsi_device *sdev)
{
int i, found = 0;
for(i = 0; scsi_dh->devlist[i].vendor; i++) {
if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
strlen(scsi_dh->devlist[i].vendor)) &&
!strncmp(sdev->model, scsi_dh->devlist[i].model,
strlen(scsi_dh->devlist[i].model))) {
found = 1;
break;
}
}
return found;
}
/*
* device_handler_match - Attach a device handler to a device
* @scsi_dh - The device handler to match against or NULL
* @sdev - SCSI device to be tested against @scsi_dh
*
* Tests @sdev against the device handler @scsi_dh or against
* all registered device_handler if @scsi_dh == NULL.
* Returns the found device handler or NULL if not found.
*/
static struct scsi_device_handler *
device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device *sdev)
{
struct scsi_device_handler *found_dh = NULL;
struct scsi_dh_devinfo_list *tmp;
found_dh = scsi_dh_cache_lookup(sdev);
if (found_dh)
return found_dh;
if (scsi_dh) {
if (scsi_dh_handler_lookup(scsi_dh, sdev))
found_dh = scsi_dh;
} else {
struct scsi_device_handler *tmp_dh;
spin_lock(&list_lock);
list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
if (scsi_dh_handler_lookup(tmp_dh, sdev))
found_dh = tmp_dh;
}
spin_unlock(&list_lock);
}
if (found_dh) { /* If device is found, add it to the cache */
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (tmp) {
strncpy(tmp->vendor, sdev->vendor, 8);
strncpy(tmp->model, sdev->model, 16);
tmp->vendor[8] = '\0';
tmp->model[16] = '\0';
tmp->handler = found_dh;
spin_lock(&list_lock);
list_add(&tmp->node, &scsi_dh_dev_list);
spin_unlock(&list_lock);
} else {
found_dh = NULL;
}
}
return found_dh;
}
/*
* scsi_dh_handler_attach - Attach a device handler to a device
* @sdev - SCSI device the device handler should attach to
* @scsi_dh - The device handler to attach
*/
static int scsi_dh_handler_attach(struct scsi_device *sdev,
struct scsi_device_handler *scsi_dh)
{
int err = 0;
if (sdev->scsi_dh_data) {
if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
err = -EBUSY;
} else if (scsi_dh->attach)
err = scsi_dh->attach(sdev);
return err;
}
/*
* scsi_dh_handler_detach - Detach a device handler from a device
* @sdev - SCSI device the device handler should be detached from
* @scsi_dh - Device handler to be detached
*
* Detach from a device handler. If a device handler is specified,
* only detach if the currently attached handler matches @scsi_dh.
*/
static void scsi_dh_handler_detach(struct scsi_device *sdev,
struct scsi_device_handler *scsi_dh)
{
if (!sdev->scsi_dh_data)
return;
if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
return;
if (!scsi_dh)
scsi_dh = sdev->scsi_dh_data->scsi_dh;
if (scsi_dh && scsi_dh->detach)
scsi_dh->detach(sdev);
}
/*
* Functions for sysfs attribute 'dh_state'
*/
static ssize_t
store_dh_state(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_device_handler *scsi_dh;
int err = -EINVAL;
if (!sdev->scsi_dh_data) {
/*
* Attach to a device handler
*/
if (!(scsi_dh = get_device_handler(buf)))
return err;
err = scsi_dh_handler_attach(sdev, scsi_dh);
} else {
scsi_dh = sdev->scsi_dh_data->scsi_dh;
if (!strncmp(buf, "detach", 6)) {
/*
* Detach from a device handler
*/
scsi_dh_handler_detach(sdev, scsi_dh);
err = 0;
} else if (!strncmp(buf, "activate", 8)) {
/*
* Activate a device handler
*/
if (scsi_dh->activate)
err = scsi_dh->activate(sdev);
else
err = 0;
}
}
return err<0?err:count;
}
static ssize_t
show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
if (!sdev->scsi_dh_data)
return snprintf(buf, 20, "detached\n");
return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
}
static struct device_attribute scsi_dh_state_attr =
__ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
store_dh_state);
/*
* scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
*/
static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
{
struct scsi_device *sdev;
int err;
if (!scsi_is_sdev_device(dev))
return 0;
sdev = to_scsi_device(dev);
err = device_create_file(&sdev->sdev_gendev,
&scsi_dh_state_attr);
return 0;
}
/*
* scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
*/
static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
{
struct scsi_device *sdev;
if (!scsi_is_sdev_device(dev))
return 0;
sdev = to_scsi_device(dev);
device_remove_file(&sdev->sdev_gendev,
&scsi_dh_state_attr);
return 0;
}
/*
* scsi_dh_notifier - notifier chain callback
*/
static int scsi_dh_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct scsi_device *sdev;
int err = 0;
struct scsi_device_handler *devinfo = NULL;
if (!scsi_is_sdev_device(dev))
return 0;
sdev = to_scsi_device(dev);
if (action == BUS_NOTIFY_ADD_DEVICE) {
devinfo = device_handler_match(NULL, sdev);
if (!devinfo)
goto out;
err = scsi_dh_handler_attach(sdev, devinfo);
if (!err)
err = device_create_file(dev, &scsi_dh_state_attr);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
device_remove_file(dev, &scsi_dh_state_attr);
scsi_dh_handler_detach(sdev, NULL);
}
out:
return err;
}
/*
* scsi_dh_notifier_add - Callback for scsi_register_device_handler
*/
static int scsi_dh_notifier_add(struct device *dev, void *data)
{
struct scsi_device_handler *scsi_dh = data;
struct scsi_device *sdev;
if (!scsi_is_sdev_device(dev))
return 0;
if (!get_device(dev))
return 0;
sdev = to_scsi_device(dev);
if (device_handler_match(scsi_dh, sdev))
scsi_dh_handler_attach(sdev, scsi_dh);
put_device(dev);
return 0;
}
/*
* scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
*/
static int scsi_dh_notifier_remove(struct device *dev, void *data)
{
struct scsi_device_handler *scsi_dh = data;
struct scsi_device *sdev;
if (!scsi_is_sdev_device(dev))
return 0;
if (!get_device(dev))
return 0;
sdev = to_scsi_device(dev);
scsi_dh_handler_detach(sdev, scsi_dh);
put_device(dev);
scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
return 0;
}
......@@ -59,33 +363,19 @@ static int scsi_dh_notifier_add(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
int ret = -EBUSY;
struct scsi_device_handler *tmp;
if (get_device_handler(scsi_dh->name))
return -EBUSY;
tmp = get_device_handler(scsi_dh->name);
if (tmp)
goto done;
ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
spin_lock(&list_lock);
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
done:
return ret;
return SCSI_DH_OK;
}
EXPORT_SYMBOL_GPL(scsi_register_device_handler);
static int scsi_dh_notifier_remove(struct device *dev, void *data)
{
struct scsi_device_handler *scsi_dh = data;
scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
return 0;
}
/*
* scsi_unregister_device_handler - register a device handler personality
* module.
......@@ -95,23 +385,26 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
int ret = -ENODEV;
struct scsi_device_handler *tmp;
tmp = get_device_handler(scsi_dh->name);
if (!tmp)
goto done;
struct scsi_dh_devinfo_list *tmp, *pos;
ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
scsi_dh_notifier_remove);
spin_lock(&list_lock);
list_del(&scsi_dh->list);
list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
if (pos->handler == scsi_dh) {
list_del(&pos->node);
kfree(pos);
}
}
spin_unlock(&list_lock);
printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
done:
return ret;
return SCSI_DH_OK;
}
EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
......@@ -157,6 +450,97 @@ int scsi_dh_handler_exist(const char *name)
}
EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
/*
* scsi_dh_handler_attach - Attach device handler
* @sdev - sdev the handler should be attached to
* @name - name of the handler to attach
*/
int scsi_dh_attach(struct request_queue *q, const char *name)
{
unsigned long flags;
struct scsi_device *sdev;
struct scsi_device_handler *scsi_dh;
int err = 0;
scsi_dh = get_device_handler(name);
if (!scsi_dh)
return -EINVAL;
spin_lock_irqsave(q->queue_lock, flags);
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
err = -ENODEV;
spin_unlock_irqrestore(q->queue_lock, flags);
if (!err) {
err = scsi_dh_handler_attach(sdev, scsi_dh);
put_device(&sdev->sdev_gendev);
}
return err;
}
EXPORT_SYMBOL_GPL(scsi_dh_attach);
/*
* scsi_dh_handler_detach - Detach device handler
* @sdev - sdev the handler should be detached from
*
* This function will detach the device handler only
* if the sdev is not part of the internal list, ie
* if it has been attached manually.
*/
void scsi_dh_detach(struct request_queue *q)
{
unsigned long flags;
struct scsi_device *sdev;
struct scsi_device_handler *scsi_dh = NULL;
spin_lock_irqsave(q->queue_lock, flags);
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
spin_unlock_irqrestore(q->queue_lock, flags);
if (!sdev)
return;
if (sdev->scsi_dh_data) {
/* if sdev is not on internal list, detach */
scsi_dh = sdev->scsi_dh_data->scsi_dh;
if (!device_handler_match(scsi_dh, sdev))
scsi_dh_handler_detach(sdev, scsi_dh);
}
put_device(&sdev->sdev_gendev);
}
EXPORT_SYMBOL_GPL(scsi_dh_detach);
static struct notifier_block scsi_dh_nb = {
.notifier_call = scsi_dh_notifier
};
static int __init scsi_dh_init(void)
{
int r;
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_add);
return r;
}
static void __exit scsi_dh_exit(void)
{
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
}
module_init(scsi_dh_init);
module_exit(scsi_dh_exit);
MODULE_DESCRIPTION("SCSI device handler");
MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
MODULE_LICENSE("GPL");
此差异已折叠。
......@@ -4,6 +4,7 @@
*
* Copyright (C) 2006 Red Hat, Inc. All rights reserved.
* Copyright (C) 2006 Mike Christie
* Copyright (C) 2008 Hannes Reinecke <hare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -25,13 +26,18 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#define HP_SW_NAME "hp_sw"
#define HP_SW_NAME "hp_sw"
#define HP_SW_TIMEOUT (60 * HZ)
#define HP_SW_RETRIES 3
#define HP_SW_TIMEOUT (60 * HZ)
#define HP_SW_RETRIES 3
#define HP_SW_PATH_UNINITIALIZED -1
#define HP_SW_PATH_ACTIVE 0
#define HP_SW_PATH_PASSIVE 1
struct hp_sw_dh_data {
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
int path_state;
int retries;
};
......@@ -42,51 +48,161 @@ static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
}
static int hp_sw_done(struct scsi_device *sdev)
/*
* tur_done - Handle TEST UNIT READY return status
* @sdev: sdev the command has been sent to
* @errors: blk error code
*
* Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
*/
static int tur_done(struct scsi_device *sdev, unsigned char *sense)
{
struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct scsi_sense_hdr sshdr;
int rc;
sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
int ret;
rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
if (!rc)
ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
if (!ret) {
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed, no sense available\n",
HP_SW_NAME);
ret = SCSI_DH_IO;
goto done;
}
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
ret = SCSI_DH_IMM_RETRY;
break;
case NOT_READY:
if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
rc = SCSI_DH_RETRY;
h->retries++;
if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
/*
* LUN not ready - Initialization command required
*
* This is the passive path
*/
ret = SCSI_DH_DEV_OFFLINED;
break;
}
/* fall through */
/* Fallthrough */
default:
h->retries++;
rc = SCSI_DH_IMM_RETRY;
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed, sense %x/%x/%x\n",
HP_SW_NAME, sshdr.sense_key, sshdr.asc,
sshdr.ascq);
break;
}
done:
if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
h->retries = 0;
else if (h->retries > HP_SW_RETRIES) {
h->retries = 0;
return ret;
}
/*
* hp_sw_tur - Send TEST UNIT READY
* @sdev: sdev command should be sent to
*
* Use the TEST UNIT READY command to determine
* the path state.
*/
static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
{
struct request *req;
int ret;
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (ret == -EIO) {
if (req->sense_len > 0) {
ret = tur_done(sdev, h->sense);
} else {
sdev_printk(KERN_WARNING, sdev,
"%s: sending tur failed with %x\n",
HP_SW_NAME, req->errors);
ret = SCSI_DH_IO;
}
} else {
h->path_state = HP_SW_PATH_ACTIVE;
ret = SCSI_DH_OK;
}
if (ret == SCSI_DH_IMM_RETRY)
goto retry;
if (ret == SCSI_DH_DEV_OFFLINED) {
h->path_state = HP_SW_PATH_PASSIVE;
ret = SCSI_DH_OK;
}
blk_put_request(req);
return ret;
}
/*
* start_done - Handle START STOP UNIT return status
* @sdev: sdev the command has been sent to
* @errors: blk error code
*/
static int start_done(struct scsi_device *sdev, unsigned char *sense)
{
struct scsi_sense_hdr sshdr;
int rc;
rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
if (!rc) {
sdev_printk(KERN_WARNING, sdev,
"%s: sending start_stop_unit failed, "
"no sense available\n",
HP_SW_NAME);
return SCSI_DH_IO;
}
switch (sshdr.sense_key) {
case NOT_READY:
if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
/*
* LUN not ready - manual intervention required
*
* Switch-over in progress, retry.
*/
rc = SCSI_DH_RETRY;
break;
}
/* fall through */
default:
sdev_printk(KERN_WARNING, sdev,
"%s: sending start_stop_unit failed, sense %x/%x/%x\n",
HP_SW_NAME, sshdr.sense_key, sshdr.asc,
sshdr.ascq);
rc = SCSI_DH_IO;
}
return rc;
}
static int hp_sw_activate(struct scsi_device *sdev)
/*
* hp_sw_start_stop - Send START STOP UNIT command
* @sdev: sdev command should be sent to
*
* Sending START STOP UNIT activates the SP.
*/
static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
{
struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
struct request *req;
int ret = SCSI_DH_RES_TEMP_UNAVAIL;
int ret, retry;
req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
goto done;
sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
......@@ -98,95 +214,153 @@ static int hp_sw_activate(struct scsi_device *sdev)
req->sense = h->sense;
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
retry = h->retries;
retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (!ret) /* SUCCESS */
ret = hp_sw_done(sdev);
else
if (ret == -EIO) {
if (req->sense_len > 0) {
ret = start_done(sdev, h->sense);
} else {
sdev_printk(KERN_WARNING, sdev,
"%s: sending start_stop_unit failed with %x\n",
HP_SW_NAME, req->errors);
ret = SCSI_DH_IO;
}
} else
ret = SCSI_DH_OK;
if (ret == SCSI_DH_RETRY) {
if (--retry)
goto retry;
ret = SCSI_DH_IO;
done:
}
blk_put_request(req);
return ret;
}
static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
int ret = BLKPREP_OK;
if (h->path_state != HP_SW_PATH_ACTIVE) {
ret = BLKPREP_KILL;
req->cmd_flags |= REQ_QUIET;
}
return ret;
}
/*
* hp_sw_activate - Activate a path
* @sdev: sdev on the path to be activated
*
* The HP Active/Passive firmware is pretty simple;
* the passive path reports NOT READY with sense codes
* 0x04/0x02; a START STOP UNIT command will then
* activate the passive path (and deactivate the
* previously active one).
*/
static int hp_sw_activate(struct scsi_device *sdev)
{
int ret = SCSI_DH_OK;
struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
ret = hp_sw_tur(sdev, h);
if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
ret = hp_sw_start_stop(sdev, h);
if (ret == SCSI_DH_OK)
sdev_printk(KERN_INFO, sdev,
"%s: activated path\n",
HP_SW_NAME);
}
return ret;
}
static const struct {
char *vendor;
char *model;
} hp_sw_dh_data_list[] = {
{"COMPAQ", "MSA"},
{"HP", "HSV"},
const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
{"COMPAQ", "MSA1000 VOLUME"},
{"COMPAQ", "HSV110"},
{"HP", "HSV100"},
{"DEC", "HSG80"},
{NULL, NULL},
};
static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
static int hp_sw_bus_attach(struct scsi_device *sdev);
static void hp_sw_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler hp_sw_dh = {
.name = HP_SW_NAME,
.module = THIS_MODULE,
.nb.notifier_call = hp_sw_bus_notify,
.devlist = hp_sw_dh_data_list,
.attach = hp_sw_bus_attach,
.detach = hp_sw_bus_detach,
.activate = hp_sw_activate,
.prep_fn = hp_sw_prep_fn,
};
static int hp_sw_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
static int hp_sw_bus_attach(struct scsi_device *sdev)
{
struct device *dev = data;
struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
int i, found = 0;
struct hp_sw_dh_data *h;
unsigned long flags;
int ret;
if (!scsi_is_sdev_device(dev))
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
HP_SW_NAME);
return 0;
}
sdev = to_scsi_device(dev);
if (action == BUS_NOTIFY_ADD_DEVICE) {
for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
strlen(hp_sw_dh_data_list[i].vendor)) &&
!strncmp(sdev->model, hp_sw_dh_data_list[i].model,
strlen(hp_sw_dh_data_list[i].model))) {
found = 1;
break;
}
}
if (!found)
goto out;
scsi_dh_data->scsi_dh = &hp_sw_dh;
h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
h->path_state = HP_SW_PATH_UNINITIALIZED;
h->retries = HP_SW_RETRIES;
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
HP_SW_NAME);
goto out;
}
ret = hp_sw_tur(sdev, h);
if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
goto failed;
scsi_dh_data->scsi_dh = &hp_sw_dh;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
try_module_get(THIS_MODULE);
if (!try_module_get(THIS_MODULE))
goto failed;
sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
if (sdev->scsi_dh_data == NULL ||
sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
goto out;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
module_put(THIS_MODULE);
sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
"active":"passive");
sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
return 0;
kfree(scsi_dh_data);
}
failed:
kfree(scsi_dh_data);
sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
HP_SW_NAME);
return -EINVAL;
}
out:
return 0;
static void hp_sw_bus_detach( struct scsi_device *sdev )
{
struct scsi_dh_data *scsi_dh_data;
unsigned long flags;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
module_put(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME);
kfree(scsi_dh_data);
}
static int __init hp_sw_init(void)
......@@ -202,6 +376,6 @@ static void __exit hp_sw_exit(void)
module_init(hp_sw_init);
module_exit(hp_sw_exit);
MODULE_DESCRIPTION("HP MSA 1000");
MODULE_DESCRIPTION("HP Active/Passive driver");
MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
MODULE_LICENSE("GPL");
......@@ -173,6 +173,11 @@ struct rdac_dh_data {
#define RDAC_STATE_ACTIVE 0
#define RDAC_STATE_PASSIVE 1
unsigned char state;
#define RDAC_LUN_UNOWNED 0
#define RDAC_LUN_OWNED 1
#define RDAC_LUN_AVT 2
char lun_state;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
......@@ -182,6 +187,13 @@ struct rdac_dh_data {
} inq;
};
static const char *lun_state[] =
{
"unowned",
"owned",
"owned (AVT mode)",
};
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
......@@ -197,9 +209,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
struct rdac_dh_data *h = get_rdac_data(sdev);
rq = blk_get_request(q, rw, GFP_KERNEL);
rq = blk_get_request(q, rw, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev,
......@@ -207,17 +218,14 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return NULL;
}
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
blk_put_request(rq);
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_rq_map_kern failed.\n");
return NULL;
}
memset(&rq->cmd, 0, BLK_MAX_CDB);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
......@@ -227,12 +235,12 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return rq;
}
static struct request *rdac_failover_get(struct scsi_device *sdev)
static struct request *rdac_failover_get(struct scsi_device *sdev,
struct rdac_dh_data *h)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
struct rdac_dh_data *h = get_rdac_data(sdev);
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
......@@ -277,6 +285,10 @@ static struct request *rdac_failover_get(struct scsi_device *sdev)
}
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
return rq;
}
......@@ -321,11 +333,10 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
}
static int submit_inquiry(struct scsi_device *sdev, int page_code,
unsigned int len)
unsigned int len, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = get_rdac_req(sdev, &h->inq, len, READ);
......@@ -338,59 +349,68 @@ static int submit_inquiry(struct scsi_device *sdev, int page_code,
rq->cmd[2] = page_code;
rq->cmd[4] = len;
rq->cmd_len = COMMAND_SIZE(INQUIRY);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
err = blk_execute_rq(q, NULL, rq, 1);
if (err == -EIO)
err = SCSI_DH_IO;
blk_put_request(rq);
done:
return err;
}
static int get_lun(struct scsi_device *sdev)
static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c8_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c8;
h->lun = inqp->lun[7]; /* currently it uses only one byte */
if (inqp->page_code != 0xc8)
return SCSI_DH_NOSYS;
if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
return SCSI_DH_NOSYS;
h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun);
}
return err;
}
#define RDAC_OWNED 0
#define RDAC_UNOWNED 1
#define RDAC_FAILED 2
static int check_ownership(struct scsi_device *sdev)
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c9_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
err = RDAC_UNOWNED;
inqp = &h->inq.c9;
/*
* If in AVT mode or if the path already owns the LUN,
* return RDAC_OWNED;
*/
if (((inqp->avte_cvp >> 7) == 0x1) ||
((inqp->avte_cvp & 0x1) != 0))
err = RDAC_OWNED;
} else
err = RDAC_FAILED;
if ((inqp->avte_cvp >> 7) == 0x1) {
/* LUN in AVT mode */
sdev_printk(KERN_NOTICE, sdev,
"%s: AVT mode detected\n",
RDAC_NAME);
h->lun_state = RDAC_LUN_AVT;
} else if ((inqp->avte_cvp & 0x1) != 0) {
/* LUN was owned by the controller */
h->lun_state = RDAC_LUN_OWNED;
}
}
return err;
}
static int initialize_controller(struct scsi_device *sdev)
static int initialize_controller(struct scsi_device *sdev,
struct rdac_dh_data *h)
{
int err;
struct c4_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
......@@ -400,13 +420,12 @@ static int initialize_controller(struct scsi_device *sdev)
return err;
}
static int set_mode_select(struct scsi_device *sdev)
static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c2_inquiry *inqp;
struct rdac_dh_data *h = get_rdac_data(sdev);
err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c2;
/*
......@@ -421,13 +440,13 @@ static int set_mode_select(struct scsi_device *sdev)
return err;
}
static int mode_select_handle_sense(struct scsi_device *sdev)
static int mode_select_handle_sense(struct scsi_device *sdev,
unsigned char *sensebuf)
{
struct scsi_sense_hdr sense_hdr;
struct rdac_dh_data *h = get_rdac_data(sdev);
int sense, err = SCSI_DH_IO, ret;
ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
goto done;
......@@ -451,14 +470,13 @@ static int mode_select_handle_sense(struct scsi_device *sdev)
return err;
}
static int send_mode_select(struct scsi_device *sdev)
static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = rdac_failover_get(sdev);
rq = rdac_failover_get(sdev, h);
if (!rq)
goto done;
......@@ -466,9 +484,11 @@ static int send_mode_select(struct scsi_device *sdev)
err = blk_execute_rq(q, NULL, rq, 1);
if (err != SCSI_DH_OK)
err = mode_select_handle_sense(sdev);
err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_OK)
h->state = RDAC_STATE_ACTIVE;
blk_put_request(rq);
done:
return err;
}
......@@ -478,38 +498,23 @@ static int rdac_activate(struct scsi_device *sdev)
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_OK;
if (h->lun == UNINITIALIZED_LUN) {
err = get_lun(sdev);
if (err != SCSI_DH_OK)
goto done;
}
err = check_ownership(sdev);
switch (err) {
case RDAC_UNOWNED:
break;
case RDAC_OWNED:
err = SCSI_DH_OK;
goto done;
case RDAC_FAILED:
default:
err = SCSI_DH_IO;
err = check_ownership(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
if (!h->ctlr) {
err = initialize_controller(sdev);
err = initialize_controller(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
if (h->ctlr->use_ms10 == -1) {
err = set_mode_select(sdev);
err = set_mode_select(sdev, h);
if (err != SCSI_DH_OK)
goto done;
}
err = send_mode_select(sdev);
if (h->lun_state == RDAC_LUN_UNOWNED)
err = send_mode_select(sdev, h);
done:
return err;
}
......@@ -569,10 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
return SCSI_RETURN_NOT_HANDLED;
}
static const struct {
char *vendor;
char *model;
} rdac_dev_list[] = {
const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1722"},
{"IBM", "1724"},
{"IBM", "1726"},
......@@ -590,89 +592,89 @@ static const struct {
{NULL, NULL},
};
static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
static int rdac_bus_attach(struct scsi_device *sdev);
static void rdac_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler rdac_dh = {
.name = RDAC_NAME,
.module = THIS_MODULE,
.nb.notifier_call = rdac_bus_notify,
.devlist = rdac_dev_list,
.prep_fn = rdac_prep_fn,
.check_sense = rdac_check_sense,
.attach = rdac_bus_attach,
.detach = rdac_bus_detach,
.activate = rdac_activate,
};
/*
* TODO: need some interface so we can set trespass values
*/
static int rdac_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
static int rdac_bus_attach(struct scsi_device *sdev)
{
struct device *dev = data;
struct scsi_device *sdev;
struct scsi_dh_data *scsi_dh_data;
struct rdac_dh_data *h;
int i, found = 0;
unsigned long flags;
int err;
if (!scsi_is_sdev_device(dev))
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
RDAC_NAME);
return 0;
}
sdev = to_scsi_device(dev);
if (action == BUS_NOTIFY_ADD_DEVICE) {
for (i = 0; rdac_dev_list[i].vendor; i++) {
if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
strlen(rdac_dev_list[i].vendor)) &&
!strncmp(sdev->model, rdac_dev_list[i].model,
strlen(rdac_dev_list[i].model))) {
found = 1;
break;
}
}
if (!found)
goto out;
scsi_dh_data->scsi_dh = &rdac_dh;
h = (struct rdac_dh_data *) scsi_dh_data->buf;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
RDAC_NAME);
goto out;
}
err = get_lun(sdev, h);
if (err != SCSI_DH_OK)
goto failed;
scsi_dh_data->scsi_dh = &rdac_dh;
h = (struct rdac_dh_data *) scsi_dh_data->buf;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
try_module_get(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
if (sdev->scsi_dh_data == NULL ||
sdev->scsi_dh_data->scsi_dh != &rdac_dh)
goto out;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
h = (struct rdac_dh_data *) scsi_dh_data->buf;
if (h->ctlr)
kref_put(&h->ctlr->kref, release_controller);
kfree(scsi_dh_data);
module_put(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
}
err = check_ownership(sdev, h);
if (err != SCSI_DH_OK)
goto failed;
if (!try_module_get(THIS_MODULE))
goto failed;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
sdev_printk(KERN_NOTICE, sdev,
"%s: LUN %d (%s)\n",
RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
out:
return 0;
failed:
kfree(scsi_dh_data);
sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
RDAC_NAME);
return -EINVAL;
}
static void rdac_bus_detach( struct scsi_device *sdev )
{
struct scsi_dh_data *scsi_dh_data;
struct rdac_dh_data *h;
unsigned long flags;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
h = (struct rdac_dh_data *) scsi_dh_data->buf;
if (h->ctlr)
kref_put(&h->ctlr->kref, release_controller);
kfree(scsi_dh_data);
module_put(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
}
static int __init rdac_init(void)
{
int r;
......
......@@ -521,9 +521,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
}
} else
vhost->reinit = 1;
......@@ -854,39 +855,41 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
}
/**
* __ibmvfc_find_target - Find the specified scsi_target (no locking)
* __ibmvfc_get_target - Find the specified scsi_target (no locking)
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
struct ibmvfc_target *tgt;
list_for_each_entry(tgt, &vhost->targets, queue)
if (tgt->target_id == starget->id)
if (tgt->target_id == starget->id) {
kref_get(&tgt->kref);
return tgt;
}
return NULL;
}
/**
* ibmvfc_find_target - Find the specified scsi_target
* ibmvfc_get_target - Find the specified scsi_target
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_target *tgt;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
tgt = __ibmvfc_find_target(starget);
tgt = __ibmvfc_get_target(starget);
spin_unlock_irqrestore(shost->host_lock, flags);
return tgt;
}
......@@ -963,6 +966,9 @@ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
case IBMVFC_HALTED:
fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
break;
case IBMVFC_NO_CRQ:
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
break;
default:
ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
......@@ -987,6 +993,17 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
rport->dev_loss_tmo = 1;
}
/**
* ibmvfc_release_tgt - Free memory allocated for a target
* @kref: kref struct
*
**/
static void ibmvfc_release_tgt(struct kref *kref)
{
struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
kfree(tgt);
}
/**
* ibmvfc_get_starget_node_name - Get SCSI target's node name
* @starget: scsi target struct
......@@ -996,8 +1013,10 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
**/
static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
{
struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
if (tgt)
kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
......@@ -1009,8 +1028,10 @@ static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
**/
static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
{
struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
if (tgt)
kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
......@@ -1022,8 +1043,10 @@ static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
**/
static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
{
struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
if (tgt)
kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
......@@ -1113,7 +1136,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
login_info->capabilities = IBMVFC_CAN_MIGRATE;
login_info->async.va = vhost->async_crq.msg_token;
login_info->async.len = vhost->async_crq.size;
login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
strncpy(login_info->device_name,
vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
......@@ -1404,7 +1427,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
err = cmd_status[index].name;
}
if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
return;
if (rsp->flags & FCP_RSP_LEN_VALID)
......@@ -2054,7 +2077,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
{
const char *desc = ibmvfc_get_ae_desc(crq->event);
ibmvfc_log(vhost, 2, "%s event received\n", desc);
ibmvfc_log(vhost, 3, "%s event received\n", desc);
switch (crq->event) {
case IBMVFC_AE_LINK_UP:
......@@ -2647,17 +2670,6 @@ static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
ibmvfc_init_tgt(tgt, job_step);
}
/**
* ibmvfc_release_tgt - Free memory allocated for a target
* @kref: kref struct
*
**/
static void ibmvfc_release_tgt(struct kref *kref)
{
struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
kfree(tgt);
}
/**
* ibmvfc_tgt_prli_done - Completion handler for Process Login
* @evt: ibmvfc event struct
......@@ -2901,6 +2913,139 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
tgt_dbg(tgt, "Sent Implicit Logout\n");
}
/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
*
* Returns:
* 1 if PLOGI needed / 0 if PLOGI not needed
**/
static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
struct ibmvfc_target *tgt)
{
if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
sizeof(tgt->ids.port_name)))
return 1;
if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
sizeof(tgt->ids.node_name)))
return 1;
if (mad->fc_iu.response[6] != tgt->scsi_id)
return 1;
return 0;
}
/**
* ibmvfc_tgt_adisc_done - Completion handler for ADISC
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
{
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
u32 status = mad->common.status;
u8 fc_reason, fc_explain;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
tgt->need_login = 1;
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
tgt->need_login = 1;
fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
mad->iu.status, mad->iu.error,
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
};
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_init_passthru - Initialize an event struct for FC passthru
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
{
struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
memset(mad, 0, sizeof(*mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_PASSTHRU;
mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
mad->cmd_ioba.va = (u64)evt->crq.ioba +
offsetof(struct ibmvfc_passthru_mad, iu);
mad->cmd_ioba.len = sizeof(mad->iu);
mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
mad->iu.rsp_len = sizeof(mad->fc_iu.response);
mad->iu.cmd.va = (u64)evt->crq.ioba +
offsetof(struct ibmvfc_passthru_mad, fc_iu) +
offsetof(struct ibmvfc_passthru_fc_iu, payload);
mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
mad->iu.rsp.va = (u64)evt->crq.ioba +
offsetof(struct ibmvfc_passthru_mad, fc_iu) +
offsetof(struct ibmvfc_passthru_fc_iu, response);
mad->iu.rsp.len = sizeof(mad->fc_iu.response);
}
/**
* ibmvfc_tgt_adisc - Initiate an ADISC for specified target
* @tgt: ibmvfc target struct
*
**/
static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
{
struct ibmvfc_passthru_mad *mad;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
if (vhost->discovery_threads >= disc_threads)
return;
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
ibmvfc_init_passthru(evt);
mad = &evt->iu.passthru;
mad->iu.flags = IBMVFC_FC_ELS;
mad->iu.scsi_id = tgt->scsi_id;
mad->fc_iu.payload[0] = IBMVFC_ADISC;
memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
sizeof(vhost->login_buf->resp.port_name));
memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
sizeof(vhost->login_buf->resp.node_name));
mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
tgt_dbg(tgt, "Sent ADISC\n");
}
/**
* ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
* @evt: ibmvfc event struct
......@@ -2921,6 +3066,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
tgt->new_scsi_id = rsp->scsi_id;
if (rsp->scsi_id != tgt->scsi_id)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
else
ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
......@@ -3336,6 +3483,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
tgt_dbg(tgt, "rport add succeeded\n");
rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
rport->supported_classes = 0;
tgt->target_id = rport->scsi_target_id;
if (tgt->service_parms.class1_parms[0] & 0x80000000)
rport->supported_classes |= FC_COS_CLASS1;
if (tgt->service_parms.class2_parms[0] & 0x80000000)
......@@ -3800,10 +3948,12 @@ static int ibmvfc_remove(struct vio_dev *vdev)
ENTER;
ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
ibmvfc_wait_while_resetting(vhost);
ibmvfc_release_crq_queue(vhost);
kthread_stop(vhost->work_thread);
fc_remove_host(vhost->host);
scsi_remove_host(vhost->host);
ibmvfc_release_crq_queue(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
......
......@@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.0"
#define IBMVFC_DRIVER_DATE "(July 1, 2008)"
#define IBMVFC_DRIVER_VERSION "1.0.1"
#define IBMVFC_DRIVER_DATE "(July 11, 2008)"
#define IBMVFC_DEFAULT_TIMEOUT 15
#define IBMVFC_INIT_TIMEOUT 30
......@@ -119,6 +119,7 @@ enum ibmvfc_mad_types {
IBMVFC_PROCESS_LOGIN = 0x0008,
IBMVFC_QUERY_TARGET = 0x0010,
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
};
......@@ -439,6 +440,37 @@ struct ibmvfc_cmd {
struct ibmvfc_fcp_rsp rsp;
}__attribute__((packed, aligned (8)));
struct ibmvfc_passthru_fc_iu {
u32 payload[7];
#define IBMVFC_ADISC 0x52000000
u32 response[7];
};
struct ibmvfc_passthru_iu {
u64 task_tag;
u32 cmd_len;
u32 rsp_len;
u16 status;
u16 error;
u32 flags;
#define IBMVFC_FC_ELS 0x01
u32 cancel_key;
u32 reserved;
struct srp_direct_buf cmd;
struct srp_direct_buf rsp;
u64 correlation;
u64 scsi_id;
u64 tag;
u64 reserved2[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_passthru_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf cmd_ioba;
struct ibmvfc_passthru_iu iu;
struct ibmvfc_passthru_fc_iu fc_iu;
}__attribute__((packed, aligned (8)));
struct ibmvfc_trace_start_entry {
u32 xfer_len;
}__attribute__((packed));
......@@ -531,6 +563,7 @@ union ibmvfc_iu {
struct ibmvfc_implicit_logout implicit_logout;
struct ibmvfc_tmf tmf;
struct ibmvfc_cmd cmd;
struct ibmvfc_passthru_mad passthru;
}__attribute__((packed, aligned (8)));
enum ibmvfc_target_action {
......@@ -656,6 +689,9 @@ struct ibmvfc_host {
#define tgt_dbg(t, fmt, ...) \
DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
#define tgt_info(t, fmt, ...) \
dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
#define tgt_err(t, fmt, ...) \
dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
......@@ -668,8 +704,8 @@ struct ibmvfc_host {
dev_err((vhost)->dev, ##__VA_ARGS__); \
} while (0)
#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__))
#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__))
#ifdef CONFIG_SCSI_IBMVFC_TRACE
#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
......
......@@ -55,7 +55,7 @@
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
......
......@@ -163,7 +163,7 @@ static int imm_proc_info(struct Scsi_Host *host, char *buffer, char **start,
#if IMM_DEBUG > 0
#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\
y, __FUNCTION__, __LINE__); imm_fail_func(x,y);
y, __func__, __LINE__); imm_fail_func(x,y);
static inline void
imm_fail_func(imm_struct *dev, int error_code)
#else
......
......@@ -1403,10 +1403,10 @@ struct ipr_ucode_image_header {
}
#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
__FILE__, __FUNCTION__, __LINE__)
__FILE__, __func__, __LINE__)
#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__))
#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__))
#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__))
#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__))
#define ipr_err_separator \
ipr_err("----------------------------------------------------------\n")
......
......@@ -74,7 +74,7 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
__FUNCTION__, ts->stat);
__func__, ts->stat);
return AC_ERR_OTHER;
case SAS_ABORTED_TASK:
......@@ -115,7 +115,7 @@ static void sas_ata_task_done(struct sas_task *task)
} else if (stat->stat != SAM_STAT_GOOD) {
ac = sas_to_ata_err(stat);
if (ac) {
SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__,
SAS_DPRINTK("%s: SAS error %x\n", __func__,
stat->stat);
/* We saw a SAS error. Send a vague error. */
qc->err_mask = ac;
......@@ -244,20 +244,20 @@ static void sas_ata_phy_reset(struct ata_port *ap)
res = i->dft->lldd_I_T_nexus_reset(dev);
if (res != TMF_RESP_FUNC_COMPLETE)
SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
SAS_DPRINTK("%s: Found ATA device.\n", __func__);
ap->link.device[0].class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
ap->link.device[0].class = ATA_DEV_ATAPI;
break;
default:
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
__FUNCTION__,
__func__,
dev->sata_dev.command_set);
ap->link.device[0].class = ATA_DEV_UNKNOWN;
break;
......@@ -299,7 +299,7 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
{
struct domain_device *dev = ap->private_data;
SAS_DPRINTK("STUB %s\n", __FUNCTION__);
SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
case SCR_STATUS:
dev->sata_dev.sstatus = val;
......@@ -324,7 +324,7 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
{
struct domain_device *dev = ap->private_data;
SAS_DPRINTK("STUB %s\n", __FUNCTION__);
SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
case SCR_STATUS:
*val = dev->sata_dev.sstatus;
......
......@@ -121,7 +121,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
break;
} else {
SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
"status 0x%x\n", __FUNCTION__,
"status 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr),
task->task_status.resp,
task->task_status.stat);
......@@ -1279,7 +1279,7 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
goto out;
} else if (res != SMP_RESP_FUNC_ACC) {
SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
"result 0x%x\n", __FUNCTION__,
"result 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr), phy_id, i, res);
goto out;
}
......@@ -1901,7 +1901,7 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (!rsp) {
printk("%s: space for a smp response is missing\n",
__FUNCTION__);
__func__);
return -EINVAL;
}
......@@ -1914,20 +1914,20 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
if (type != SAS_EDGE_EXPANDER_DEVICE &&
type != SAS_FANOUT_EXPANDER_DEVICE) {
printk("%s: can we send a smp request to a device?\n",
__FUNCTION__);
__func__);
return -EINVAL;
}
dev = sas_find_dev_by_rphy(rphy);
if (!dev) {
printk("%s: fail to find a domain_device?\n", __FUNCTION__);
printk("%s: fail to find a domain_device?\n", __func__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk("%s: multiple segments req %u %u, rsp %u %u\n",
__FUNCTION__, req->bio->bi_vcnt, req->data_len,
__func__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
......
......@@ -50,7 +50,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
sas_deform_port(phy);
else {
SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
__FUNCTION__, phy->id, phy->port->id,
__func__, phy->id, phy->port->id,
phy->port->num_phys);
return;
}
......@@ -78,7 +78,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (i >= sas_ha->num_phys) {
printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
__FUNCTION__);
__func__);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return;
}
......
......@@ -343,7 +343,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
flags);
SAS_DPRINTK("%s: task 0x%p aborted from "
"task_queue\n",
__FUNCTION__, task);
__func__, task);
return TASK_IS_ABORTED;
}
}
......@@ -351,13 +351,13 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
}
for (i = 0; i < 5; i++) {
SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
return TASK_IS_DONE;
}
......@@ -365,24 +365,24 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__FUNCTION__, task);
__func__, task);
return TASK_IS_ABORTED;
} else if (si->dft->lldd_query_task) {
SAS_DPRINTK("%s: querying task 0x%p\n",
__FUNCTION__, task);
__func__, task);
res = si->dft->lldd_query_task(task);
switch (res) {
case TMF_RESP_FUNC_SUCC:
SAS_DPRINTK("%s: task 0x%p at LU\n",
__FUNCTION__, task);
__func__, task);
return TASK_IS_AT_LU;
case TMF_RESP_FUNC_COMPLETE:
SAS_DPRINTK("%s: task 0x%p not at LU\n",
__FUNCTION__, task);
__func__, task);
return TASK_IS_NOT_AT_LU;
case TMF_RESP_FUNC_FAILED:
SAS_DPRINTK("%s: task 0x%p failed to abort\n",
__FUNCTION__, task);
__func__, task);
return TASK_ABORT_FAILED;
}
......@@ -545,7 +545,7 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
if (need_reset) {
SAS_DPRINTK("%s: task 0x%p requests reset\n",
__FUNCTION__, task);
__func__, task);
goto reset;
}
......@@ -556,13 +556,13 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
switch (res) {
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__FUNCTION__, task);
__func__, task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
......@@ -633,7 +633,7 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
}
return list_empty(work_q);
clear_q:
SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
......@@ -650,7 +650,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s\n", __FUNCTION__);
SAS_DPRINTK("Enter %s\n", __func__);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism)
......@@ -669,7 +669,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
out:
scsi_eh_flush_done_q(&ha->eh_done_q);
SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
SAS_DPRINTK("--- Exit %s\n", __func__);
return;
}
......@@ -990,7 +990,7 @@ int __sas_task_abort(struct sas_task *task)
if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__,
SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
task);
return 0;
}
......
......@@ -39,7 +39,7 @@ enum srp_task_attributes {
/* tmp - will replace with SCSI logging stuff */
#define eprintk(fmt, args...) \
do { \
printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \
printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
} while (0)
/* #define dprintk eprintk */
#define dprintk(fmt, args...)
......
......@@ -2083,7 +2083,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
__FUNCTION__, i, LPFC_IOCB_LIST_CNT);
__func__, i, LPFC_IOCB_LIST_CNT);
error = -ENOMEM;
goto out_free_iocbq;
}
......@@ -2093,7 +2093,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
kfree (iocbq_entry);
printk(KERN_ERR "%s: failed to allocate IOTAG. "
"Unloading driver.\n",
__FUNCTION__);
__func__);
error = -ENOMEM;
goto out_free_iocbq;
}
......
......@@ -341,7 +341,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
printk(KERN_ERR "%s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d",
__FUNCTION__, phba->cfg_sg_seg_cnt,
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
scsi_dma_unmap(scsi_cmnd);
return 1;
......
......@@ -219,7 +219,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_IOCB_LOGENTRY_CN:
case CMD_IOCB_LOGENTRY_ASYNC_CN:
printk("%s - Unhandled SLI-3 Command x%x\n",
__FUNCTION__, iocb_cmnd);
__func__, iocb_cmnd);
type = LPFC_UNKNOWN_IOCB;
break;
default:
......@@ -1715,7 +1715,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
rspiocbp = __lpfc_sli_get_iocbq(phba);
if (rspiocbp == NULL) {
printk(KERN_ERR "%s: out of buffers! Failing "
"completion.\n", __FUNCTION__);
"completion.\n", __func__);
break;
}
......@@ -3793,7 +3793,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
break;
default:
printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
__FUNCTION__, ctx_cmd);
__func__, ctx_cmd);
break;
}
......
......@@ -265,7 +265,7 @@ typedef struct {
#define ASSERT(expression) \
if (!(expression)) { \
ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
#expression, __FILE__, __LINE__, __FUNCTION__); \
#expression, __FILE__, __LINE__, __func__); \
}
#else
#define ASSERT(expression)
......
......@@ -458,7 +458,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (adapter == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__));
"megaraid: out of memory, %s %d.\n", __func__, __LINE__));
goto out_probe_one;
}
......@@ -1002,7 +1002,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __FUNCTION__,
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
......@@ -1030,7 +1030,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __FUNCTION__,
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_common_mbox;
......@@ -1052,7 +1052,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
if (adapter->kscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __FUNCTION__,
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_ibuf;
}
......@@ -1060,7 +1060,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
// memory allocation for our command packets
if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __FUNCTION__,
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_scb_list;
}
......@@ -2981,7 +2981,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __FUNCTION__,
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
......@@ -3508,7 +3508,7 @@ megaraid_cmm_register(adapter_t *adapter)
if (adapter->uscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __FUNCTION__,
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
......@@ -3879,7 +3879,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
!raid_dev->sysfs_buffer) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __FUNCTION__,
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
rval = -ENOMEM;
......
......@@ -929,7 +929,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
!adapter->pthru_dma_pool) {
con_log(CL_ANN, (KERN_WARNING
"megaraid cmm: out of memory, %s %d\n", __FUNCTION__,
"megaraid cmm: out of memory, %s %d\n", __func__,
__LINE__));
rval = (-ENOMEM);
......@@ -957,7 +957,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
con_log(CL_ANN, (KERN_WARNING
"megaraid cmm: out of memory, %s %d\n",
__FUNCTION__, __LINE__));
__func__, __LINE__));
rval = (-ENOMEM);
......
......@@ -299,9 +299,9 @@ static struct scsi_host_template nsp32_template = {
#else
# define NSP32_DEBUG_MASK 0xffffff
# define nsp32_msg(type, args...) \
nsp32_message (__FUNCTION__, __LINE__, (type), args)
nsp32_message (__func__, __LINE__, (type), args)
# define nsp32_dbg(mask, args...) \
nsp32_dmessage(__FUNCTION__, __LINE__, (mask), args)
nsp32_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
......
......@@ -88,7 +88,7 @@ static void print_commandk (unsigned char *command)
int i,s;
// printk(KERN_DEBUG);
print_opcodek(command[0]);
/*printk(KERN_DEBUG "%s ", __FUNCTION__);*/
/*printk(KERN_DEBUG "%s ", __func__);*/
if ((command[0] >> 5) == 6 ||
(command[0] >> 5) == 7 ) {
s = 12; /* vender specific */
......
......@@ -107,9 +107,9 @@ static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
#else
# define NSP_DEBUG_MASK 0xffffff
# define nsp_msg(type, args...) \
nsp_cs_message (__FUNCTION__, __LINE__, (type), args)
nsp_cs_message (__func__, __LINE__, (type), args)
# define nsp_dbg(mask, args...) \
nsp_cs_dmessage(__FUNCTION__, __LINE__, (mask), args)
nsp_cs_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP_DEBUG_QUEUECOMMAND BIT(0)
......
......@@ -90,7 +90,7 @@ static void print_commandk (unsigned char *command)
int i, s;
printk(KERN_DEBUG);
print_opcodek(command[0]);
/*printk(KERN_DEBUG "%s ", __FUNCTION__);*/
/*printk(KERN_DEBUG "%s ", __func__);*/
if ((command[0] >> 5) == 6 ||
(command[0] >> 5) == 7 ) {
s = 12; /* vender specific */
......
......@@ -171,7 +171,7 @@ static int device_check(ppa_struct *dev);
#if PPA_DEBUG > 0
#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
y, __FUNCTION__, __LINE__); ppa_fail_func(x,y);
y, __func__, __LINE__); ppa_fail_func(x,y);
static inline void ppa_fail_func(ppa_struct *dev, int error_code)
#else
static inline void ppa_fail(ppa_struct *dev, int error_code)
......
......@@ -1695,7 +1695,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
dprintk(1, "%s: DMA RISC code (%i) words\n",
__FUNCTION__, risc_code_size);
__func__, risc_code_size);
num = 0;
while (risc_code_size > 0) {
......@@ -1721,7 +1721,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
__FUNCTION__, mb[0],
__func__, mb[0],
(void *)(long)ha->request_dma,
mb[6], mb[7], mb[2], mb[3]);
err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
......@@ -1753,10 +1753,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
if (tbuf[i] != sp[i] && warn++ < 10) {
printk(KERN_ERR "%s: FW compare error @ "
"byte(0x%x) loop#=%x\n",
__FUNCTION__, i, num);
__func__, i, num);
printk(KERN_ERR "%s: FWbyte=%x "
"FWfromChip=%x\n",
__FUNCTION__, sp[i], tbuf[i]);
__func__, sp[i], tbuf[i]);
/*break; */
}
}
......@@ -1781,7 +1781,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
int err;
dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
__FUNCTION__);
__func__);
/* Verify checksum of loaded RISC code. */
mb[0] = MBC_VERIFY_CHECKSUM;
......@@ -1794,7 +1794,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
}
/* Start firmware execution. */
dprintk(1, "%s: start firmware running.\n", __FUNCTION__);
dprintk(1, "%s: start firmware running.\n", __func__);
mb[0] = MBC_EXECUTE_FIRMWARE;
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
......
此差异已折叠。
......@@ -216,7 +216,7 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
static int
qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
uint16_t ram_words, void **nxt)
uint32_t ram_words, void **nxt)
{
int rval;
uint32_t cnt, stat, timer, words, idx;
......
......@@ -864,7 +864,8 @@ struct link_statistics {
uint32_t prim_seq_err_cnt;
uint32_t inval_xmit_word_cnt;
uint32_t inval_crc_cnt;
uint32_t unused1[0x1b];
uint32_t lip_cnt;
uint32_t unused1[0x1a];
uint32_t tx_frames;
uint32_t rx_frames;
uint32_t dumped_frames;
......@@ -1544,7 +1545,6 @@ typedef struct fc_port {
int login_retry;
atomic_t port_down_timer;
spinlock_t rport_lock;
struct fc_rport *rport, *drport;
u32 supported_classes;
......@@ -2155,6 +2155,10 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version;
};
struct qla_statistics {
uint32_t total_isp_aborts;
};
/*
* Linux Host Adapter structure
*/
......@@ -2166,7 +2170,6 @@ typedef struct scsi_qla_host {
struct pci_dev *pdev;
unsigned long host_no;
unsigned long instance;
volatile struct {
uint32_t init_done :1;
......@@ -2515,7 +2518,7 @@ typedef struct scsi_qla_host {
uint8_t model_number[16+1];
#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
char *model_desc;
char model_desc[80];
uint8_t adapter_id[16+1];
uint8_t *node_name;
......@@ -2596,6 +2599,7 @@ typedef struct scsi_qla_host {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
struct qla_statistics qla_stats;
} scsi_qla_host_t;
......
......@@ -62,7 +62,7 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
extern int num_hosts;
extern int ql2xiidmaenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
......@@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
uint16_t, uint16_t);
extern void qla2x00_abort_fcport_cmds(fc_port_t *);
/*
* Global Functions in qla_mid.c source file.
*/
......@@ -312,6 +314,7 @@ extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
uint16_t, uint16_t);
extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
/*
* Global Function Prototypes in qla_dbg.c source file.
......
......@@ -1661,6 +1661,12 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
{
int rval;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
DEBUG2(printk("scsi(%ld): FDMI unsupported on "
"ISP2100/ISP2200.\n", ha->host_no));
return QLA_SUCCESS;
}
rval = qla2x00_mgmt_svr_login(ha);
if (rval)
return rval;
......
此差异已折叠。
此差异已折叠。
......@@ -542,10 +542,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_PORT_UPDATE: /* Port database update */
/* Only handle SCNs for our Vport index. */
if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
break;
/*
* If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
* event etc. earlier indicating loop is down) then process
......
......@@ -918,6 +918,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
rval = qla2x00_mailbox_command(ha, mcp);
if (mcp->mb[0] == MBS_COMMAND_ERROR)
rval = QLA_COMMAND_ERROR;
else if (mcp->mb[0] == MBS_INVALID_COMMAND)
rval = QLA_INVALID_COMMAND;
/* Return data. */
*id = mcp->mb[1];
......@@ -2161,17 +2163,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
struct abort_entry_24xx *abt;
dma_addr_t abt_dma;
uint32_t handle;
scsi_qla_host_t *pha = to_qla_parent(ha);
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
fcport = sp->fcport;
spin_lock_irqsave(&ha->hardware_lock, flags);
spin_lock_irqsave(&pha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
if (ha->outstanding_cmds[handle] == sp)
if (pha->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
spin_unlock_irqrestore(&pha->hardware_lock, flags);
if (handle == MAX_OUTSTANDING_COMMANDS) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册