提交 8ae6d9c7 编写于 作者: G Giridhar Malavali 提交者: James Bottomley

[SCSI] qla2xxx: Enhancements to support ISPFx00.

[jejb: fix up checkpatch issues]
Signed-off-by: NAndrew Vazquez <andrew.vasquez@qlogic.com>
Signed-off-by: NArmen Baloyan <armen.baloyan@qlogic.com>
Signed-off-by: NGiridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: NSaurav Kashyap <saurav.kashyap@qlogic.com>
Signed-off-by: NJames Bottomley <JBottomley@Parallels.com>
上级 0ce2d534
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
qla_nx.o qla_target.o
qla_nx.o qla_mr.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
......@@ -888,7 +888,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
struct qla_hw_data *ha = vha->hw;
uint32_t sn;
if (IS_FWI2_CAPABLE(ha)) {
if (IS_QLAFX00(vha->hw)) {
return snprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.serial_num);
} else if (IS_FWI2_CAPABLE(ha)) {
qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
return snprintf(buf, PAGE_SIZE, "%s\n", buf);
}
......@@ -912,6 +915,11 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(vha->hw))
return snprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.hw_version);
return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
......@@ -922,6 +930,11 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (IS_QLAFX00(vha->hw))
return snprintf(buf, PAGE_SIZE, "%s\n",
vha->hw->mr.product_name);
return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
}
......@@ -1304,6 +1317,12 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int rval = QLA_FUNCTION_FAILED;
uint16_t state[5];
uint32_t pstate;
if (IS_QLAFX00(vha->hw)) {
pstate = qlafx00_fw_state_show(dev, attr, buf);
return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
if (qla2x00_reset_active(vha))
ql_log(ql_log_warn, vha, 0x707c,
......@@ -1454,6 +1473,11 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
(shost_priv(shost)))->hw;
u32 speed = FC_PORTSPEED_UNKNOWN;
if (IS_QLAFX00(ha)) {
qlafx00_get_host_speed(shost);
return;
}
switch (ha->link_data_rate) {
case PORT_SPEED_1GB:
speed = FC_PORTSPEED_1GBIT;
......@@ -1637,6 +1661,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
if (IS_QLAFX00(vha->hw))
return 0;
qla2x00_loop_reset(vha);
return 0;
}
......@@ -1655,6 +1682,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &vha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (IS_QLAFX00(vha->hw))
goto done;
if (test_bit(UNLOADING, &vha->dpc_flags))
goto done;
......@@ -2087,6 +2117,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
FC_PORTSPEED_1GBIT;
else if (IS_QLA23XX(ha))
speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
else if (IS_QLAFX00(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
else
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
......
......@@ -30,14 +30,31 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (sp->type == SRB_FXIOCB_BCMD) {
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
&bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
} else {
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
}
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_FXIOCB_BCMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
qla2x00_rel_sp(vha, sp);
......@@ -1882,6 +1899,128 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
return 0;
}
static int
qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
{
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16);
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
srb_t *sp;
int req_sg_cnt = 0, rsp_sg_cnt = 0;
struct fc_port *fcport;
char *type = "FC_BSG_HST_FX_MGMT";
/* Copy the IOCB specific information */
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
&bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
/* Dump the vendor information */
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
(uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
if (!vha->flags.online) {
ql_log(ql_log_warn, vha, 0x70d0,
"Host is not online.\n");
rval = -EIO;
goto done;
}
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
req_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
ql_log(ql_log_warn, vha, 0x70c7,
"dma_map_sg return %d for request\n", req_sg_cnt);
rval = -ENOMEM;
goto done;
}
}
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
ql_log(ql_log_warn, vha, 0x70c8,
"dma_map_sg return %d for reply\n", rsp_sg_cnt);
rval = -ENOMEM;
goto done_unmap_req_sg;
}
}
ql_dbg(ql_dbg_user, vha, 0x70c9,
"request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
"dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
/* Allocate a dummy fcport structure, since functions preparing the
* IOCB and mailbox command retrieves port specific information
* from fcport structure. For Host based ELS commands there will be
* no fcport structure allocated
*/
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_log(ql_log_warn, vha, 0x70ca,
"Failed to allocate fcport.\n");
rval = -ENOMEM;
goto done_unmap_rsp_sg;
}
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_warn, vha, 0x70cb,
"qla2x00_get_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->loop_id = piocb_rqst->dataword;
sp->type = SRB_FXIOCB_BCMD;
sp->name = "bsg_fx_mgmt";
sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x70cc,
"bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
type, piocb_rqst->func_type, fcport->loop_id);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x70cd,
"qla2x00_start_sp failed=%d.\n", rval);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_free_fcport;
}
return rval;
done_free_fcport:
kfree(fcport);
done_unmap_rsp_sg:
if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done_unmap_req_sg:
if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
done:
return rval;
}
static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
......@@ -1928,6 +2067,8 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_DIAG_IO_CMD:
return qla24xx_process_bidir_cmd(bsg_job);
case QL_VND_FX00_MGMT_CMD:
return qlafx00_mgmt_cmd(bsg_job);
default:
return -ENOSYS;
}
......@@ -2007,7 +2148,8 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
(sp->type == SRB_ELS_CMD_HST))
(sp->type == SRB_ELS_CMD_HST) ||
(sp->type == SRB_FXIOCB_BCMD))
&& (sp->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
......
......@@ -22,6 +22,7 @@
#define QL_VND_DIAG_IO_CMD 0x0A
#define QL_VND_WRITE_I2C 0x10
#define QL_VND_READ_I2C 0x11
#define QL_VND_FX00_MGMT_CMD 0x12
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
......
......@@ -11,28 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x1179 | 0x111a-0x111b |
* | | | 0x1155-0x1158 |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2016 |
* | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
* | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x303d-0x3041 |
* | | | 0x302d,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4022 | 0x4002,0x4013 |
* | Async Events | 0x5081 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | | | 0x5040,0x5075 |
* | Timer Routines | 0x6011 | |
* | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
* | User Space Interactions | 0x70dd | 0x7018,0x702e, |
* | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c, |
* | | | 0x707b,0x708c, |
* | | | 0x70a5,0x70a6, |
* | | | 0x70a8,0x70ab, |
* | | | 0x70ad-0x70ae |
* | | | 0x70ad-0x70ae, |
* | | | 0x70d1-0x70da |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
......
......@@ -245,7 +245,6 @@
#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
#include "qla_fw.h"
/*
* Timeout timer counts in seconds
*/
......@@ -265,6 +264,7 @@
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
struct req_que;
......@@ -284,6 +284,7 @@ struct sd_dif_tuple {
struct srb_cmd {
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
uint32_t request_sense_length;
uint32_t fw_sense_length;
uint8_t *request_sense_ptr;
void *ctx;
};
......@@ -321,7 +322,39 @@ struct srb_iocb {
uint32_t flags;
uint32_t lun;
uint32_t data;
struct completion comp;
uint32_t comp_status;
} tmf;
struct {
#define SRB_FXDISC_REQ_DMA_VALID BIT_0
#define SRB_FXDISC_RESP_DMA_VALID BIT_1
#define SRB_FXDISC_REQ_DWRD_VALID BIT_2
#define SRB_FXDISC_RSP_DWRD_VALID BIT_3
#define FXDISC_TIMEOUT 20
uint8_t flags;
uint32_t req_len;
uint32_t rsp_len;
void *req_addr;
void *rsp_addr;
dma_addr_t req_dma_handle;
dma_addr_t rsp_dma_handle;
uint32_t adapter_id;
uint32_t adapter_id_hi;
uint32_t req_func_type;
uint32_t req_data;
uint32_t req_data_extra;
uint32_t result;
uint32_t seq_number;
uint32_t fw_flags;
struct completion fxiocb_comp;
uint32_t reserved_0;
uint8_t reserved_1;
} fxiocb;
struct {
uint32_t cmd_hndl;
uint32_t comp_status;
struct completion comp;
} abt;
} u;
struct timer_list timer;
......@@ -338,6 +371,10 @@ struct srb_iocb {
#define SRB_TM_CMD 7
#define SRB_SCSI_CMD 8
#define SRB_BIDI_CMD 9
#define SRB_FXIOCB_DCMD 10
#define SRB_FXIOCB_BCMD 11
#define SRB_ABT_CMD 12
typedef struct srb {
atomic_t ref_count;
......@@ -368,6 +405,10 @@ typedef struct srb {
(sp->u.scmd.request_sense_ptr)
#define SET_CMD_SENSE_PTR(sp, ptr) \
(sp->u.scmd.request_sense_ptr = ptr)
#define GET_FW_SENSE_LEN(sp) \
(sp->u.scmd.fw_sense_length)
#define SET_FW_SENSE_LEN(sp, len) \
(sp->u.scmd.fw_sense_length = len)
struct msg_echo_lb {
dma_addr_t send_dma;
......@@ -542,11 +583,74 @@ struct device_reg_25xxmq {
uint32_t atio_q_out;
};
struct device_reg_fx00 {
uint32_t mailbox0; /* 00 */
uint32_t mailbox1; /* 04 */
uint32_t mailbox2; /* 08 */
uint32_t mailbox3; /* 0C */
uint32_t mailbox4; /* 10 */
uint32_t mailbox5; /* 14 */
uint32_t mailbox6; /* 18 */
uint32_t mailbox7; /* 1C */
uint32_t mailbox8; /* 20 */
uint32_t mailbox9; /* 24 */
uint32_t mailbox10; /* 28 */
uint32_t mailbox11;
uint32_t mailbox12;
uint32_t mailbox13;
uint32_t mailbox14;
uint32_t mailbox15;
uint32_t mailbox16;
uint32_t mailbox17;
uint32_t mailbox18;
uint32_t mailbox19;
uint32_t mailbox20;
uint32_t mailbox21;
uint32_t mailbox22;
uint32_t mailbox23;
uint32_t mailbox24;
uint32_t mailbox25;
uint32_t mailbox26;
uint32_t mailbox27;
uint32_t mailbox28;
uint32_t mailbox29;
uint32_t mailbox30;
uint32_t mailbox31;
uint32_t aenmailbox0;
uint32_t aenmailbox1;
uint32_t aenmailbox2;
uint32_t aenmailbox3;
uint32_t aenmailbox4;
uint32_t aenmailbox5;
uint32_t aenmailbox6;
uint32_t aenmailbox7;
/* Request Queue. */
uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
/* Response Queue. */
uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
/* Init values shadowed on FW Up Event */
uint32_t initval0; /* B0 */
uint32_t initval1; /* B4 */
uint32_t initval2; /* B8 */
uint32_t initval3; /* BC */
uint32_t initval4; /* C0 */
uint32_t initval5; /* C4 */
uint32_t initval6; /* C8 */
uint32_t initval7; /* CC */
uint32_t fwheartbeat; /* D0 */
};
typedef union {
struct device_reg_2xxx isp;
struct device_reg_24xx isp24;
struct device_reg_25xxmq isp25mq;
struct device_reg_82xx isp82;
struct device_reg_fx00 ispfx00;
} device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
......@@ -602,6 +706,20 @@ typedef struct {
#define IOCTL_CMD BIT_2
} mbx_cmd_t;
struct mbx_cmd_32 {
uint32_t out_mb; /* outbound from driver */
uint32_t in_mb; /* Incoming from RISC */
uint32_t mb[MAILBOX_REGISTER_COUNT];
long buf_size;
void *bufp;
uint32_t tov;
uint8_t flags;
#define MBX_DMA_IN BIT_0
#define MBX_DMA_OUT BIT_1
#define IOCTL_CMD BIT_2
};
#define MBX_TOV_SECONDS 30
/*
......@@ -677,6 +795,15 @@ typedef struct {
#define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */
#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
#define MBA_FW_NOT_STARTED 0x8050 /* Firmware not started */
#define MBA_FW_STARTING 0x8051 /* Firmware starting */
#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
/* 83XX FCoE specific */
#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
......@@ -797,6 +924,12 @@ typedef struct {
#define MBC_SEND_LFA_COMMAND 0x7D /* Send Loop Fabric Address */
#define MBC_LUN_RESET 0x7E /* Send LUN reset */
/*
* all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
* should be defined with MBC_MR_*
*/
#define MBC_MR_DRV_SHUTDOWN 0x6A
/*
* ISP24xx mailbox commands
*/
......@@ -1058,6 +1191,30 @@ typedef struct {
uint8_t reserved_3[26];
} init_cb_t;
struct init_cb_fx {
uint16_t version;
uint16_t reserved_1[13];
uint16_t request_q_outpointer;
uint16_t response_q_inpointer;
uint16_t reserved_2[2];
uint16_t response_q_length;
uint16_t request_q_length;
uint16_t reserved_3[2];
uint32_t request_q_address[2];
uint32_t response_q_address[2];
uint16_t reserved_4[4];
uint8_t response_q_msivec;
uint8_t reserved_5[19];
uint16_t interrupt_delay_timer;
uint16_t reserved_6;
uint32_t fwoptions1;
uint32_t fwoptions2;
uint32_t fwoptions3;
uint8_t reserved_7[24];
};
/*
* Get Link Status mailbox command return buffer.
*/
......@@ -1831,6 +1988,9 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
uint16_t tgt_id;
uint16_t old_tgt_id;
uint8_t fcp_prio;
uint8_t fabric_port_name[WWN_SIZE];
......@@ -1848,8 +2008,15 @@ typedef struct fc_port {
uint8_t fc4_type;
uint8_t scan_state;
unsigned long last_queue_full;
unsigned long last_ramp_up;
uint16_t port_id;
} fc_port_t;
#include "qla_mr.h"
/*
* Fibre channel port/lun states.
*/
......@@ -2391,6 +2558,7 @@ struct isp_operations {
int (*start_scsi) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
int (*iospace_config)(struct qla_hw_data*);
int (*initialize_adapter)(struct scsi_qla_host *);
};
/* MSI-X Support *************************************************************/
......@@ -2429,6 +2597,7 @@ enum qla_work_type {
QLA_EVT_ASYNC_ADISC,
QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
};
......@@ -2456,7 +2625,15 @@ struct qla_work_evt {
u32 code;
#define QLA_UEVENT_CODE_FW_DUMP 0
} uevent;
} u;
struct {
uint32_t evtcode;
uint32_t mbx[8];
uint32_t count;
} aenfx;
struct {
srb_t *sp;
} iosb;
} u;
};
struct qla_chip_state_84xx {
......@@ -2520,6 +2697,11 @@ struct rsp_que {
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
struct work_struct q_work;
dma_addr_t dma_fx00;
response_t *ring_fx00;
uint16_t length_fx00;
uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
};
/* Request queue data structure */
......@@ -2544,6 +2726,11 @@ struct req_que {
uint16_t num_outstanding_cmds;
#define MAX_Q_DEPTH 32
int max_q_depth;
dma_addr_t dma_fx00;
request_t *ring_fx00;
uint16_t length_fx00;
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
/* Place holder for FW buffer parameters */
......@@ -2633,7 +2820,10 @@ struct qla_hw_data {
uint32_t isp82xx_no_md_cap:1;
uint32_t host_shutting_down:1;
uint32_t idc_compl_status:1;
/* 32 bits */
uint32_t mr_reset_hdlr_active:1;
uint32_t mr_intr_valid:1;
/* 34 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
......@@ -2650,7 +2840,21 @@ struct qla_hw_data {
resource_size_t pio_address;
#define MIN_IOBASE_LEN 0x100
/* Multi queue data structs */
dma_addr_t bar0_hdl;
void __iomem *cregbase;
dma_addr_t bar2_hdl;
#define BAR0_LEN_FX00 (1024 * 1024)
#define BAR2_LEN_FX00 (128 * 1024)
uint32_t rqstq_intr_code;
uint32_t mbx_intr_code;
uint32_t req_que_len;
uint32_t rsp_que_len;
uint32_t req_que_off;
uint32_t rsp_que_off;
/* Multi queue data structs */
device_reg_t __iomem *mqiobase;
device_reg_t __iomem *msixbase;
uint16_t msix_count;
......@@ -2729,7 +2933,8 @@ struct qla_hw_data {
#define DT_ISP8021 BIT_14
#define DT_ISP2031 BIT_15
#define DT_ISP8031 BIT_16
#define DT_ISP_LAST (DT_ISP8031 << 1)
#define DT_ISPFX00 BIT_17
#define DT_ISP_LAST (DT_ISPFX00 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
......@@ -2757,6 +2962,7 @@ struct qla_hw_data {
#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
......@@ -2821,6 +3027,7 @@ struct qla_hw_data {
uint16_t r_a_tov;
int port_down_retry_count;
uint8_t mbx_count;
uint8_t aen_mbx_count;
uint32_t login_retry_count;
/* SNS command interfaces. */
......@@ -2868,9 +3075,13 @@ struct qla_hw_data {
void *swl;
/* These are used by mailbox operations. */
volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
mbx_cmd_t *mcp;
struct mbx_cmd_32 *mcp32;
unsigned long mbx_cmd_flags;
#define MBX_INTERRUPT 1
#define MBX_INTR_WAIT 2
......@@ -3014,6 +3225,7 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
......@@ -3080,6 +3292,8 @@ struct qla_hw_data {
unsigned long host_last_rampup_time;
int cfg_lun_q_depth;
struct mr_data_fx00 mr;
struct qlt_hw_data tgt;
uint16_t thermal_support;
#define THERMAL_SUPPORT_I2C BIT_0
......@@ -3109,6 +3323,8 @@ typedef struct scsi_qla_host {
uint32_t process_response_queue :1;
uint32_t difdix_supported:1;
uint32_t delete_progress:1;
uint32_t fw_tgt_reported:1;
} flags;
atomic_t loop_state;
......@@ -3144,6 +3360,9 @@ typedef struct scsi_qla_host {
#define SCR_PENDING 21 /* SCR in target mode */
#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
#define HOST_RAMP_UP_QUEUE_DEPTH 23
#define PORT_UPDATE_NEEDED 24
#define FX00_RESET_RECOVERY 25
#define FX00_TARGET_SCAN 26
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
......@@ -3234,6 +3453,10 @@ struct qla_tgt_vp_map {
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
atomic_read(&ha->loop_state) == LOOP_DOWN)
#define STATE_TRANSITION(ha) \
(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
atomic_inc(&__vha->vref_count); \
mb(); \
......
......@@ -86,6 +86,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
/*
* Global Data in qla_os.c source file.
......@@ -134,7 +135,6 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
......@@ -158,6 +158,7 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
/*
* Global Functions in qla_mid.c source file.
......@@ -211,8 +212,6 @@ extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
......@@ -424,6 +423,12 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
extern srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
void *);
extern void
qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
/*
* Global Function Prototypes in qla_sup.c source file.
......@@ -561,6 +566,42 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* qlafx00 related functions */
extern int qlafx00_pci_config(struct scsi_qla_host *);
extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
extern void qlafx00_soft_reset(scsi_qla_host_t *);
extern int qlafx00_chip_diag(scsi_qla_host_t *);
extern void qlafx00_config_rings(struct scsi_qla_host *);
extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
extern void qlafx00_disable_intrs(struct qla_hw_data *);
extern int qlafx00_abort_command(srb_t *);
extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
extern int qlafx00_start_scsi(srb_t *);
extern int qlafx00_abort_isp(scsi_qla_host_t *);
extern int qlafx00_iospace_config(struct qla_hw_data *);
extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
uint32_t *, int);
extern uint32_t qlafx00_fw_state_show(struct device *,
struct device_attribute *, char *);
extern void qlafx00_get_host_speed(struct Scsi_Host *);
extern void qlafx00_init_response_q_entries(struct rsp_que *);
extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
extern void qlafx00_timer_routine(scsi_qla_host_t *);
extern int qlafx00_rescan_isp(scsi_qla_host_t *);
/* qla82xx related functions */
/* PCI related functions */
......
......@@ -639,9 +639,14 @@ void
qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
{
struct qla_hw_data *ha = vha->hw;
sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version, qla2x00_version_str);
if (IS_QLAFX00(ha))
sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
ha->mr.fw_version, qla2x00_version_str);
else
sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version, qla2x00_version_str);
}
/**
......@@ -923,7 +928,7 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sns_cmd->p.gpn_data[9] != 0x02) {
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
"GPN_ID failed, rejected request, gpn_rsp:\n");
ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
sns_cmd->p.gpn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
......@@ -1718,7 +1723,8 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
IS_QLAFX00(ha))
return QLA_FUNCTION_FAILED;
rval = qla2x00_mgmt_svr_login(vha);
......
......@@ -25,7 +25,6 @@
*/
static int qla2x00_isp_firmware(scsi_qla_host_t *);
static int qla2x00_setup_chip(scsi_qla_host_t *);
static int qla2x00_init_rings(scsi_qla_host_t *);
static int qla2x00_fw_ready(scsi_qla_host_t *);
static int qla2x00_configure_hba(scsi_qla_host_t *);
static int qla2x00_configure_loop(scsi_qla_host_t *);
......@@ -83,7 +82,9 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
/* Firmware should use switch negotiated r_a_tov for timeout. */
tmo = ha->r_a_tov / 10 * 2;
if (!IS_FWI2_CAPABLE(ha)) {
if (IS_QLAFX00(ha)) {
tmo = FX00_DEF_RATOV * 2;
} else if (!IS_FWI2_CAPABLE(ha)) {
/*
* Except for earlier ISPs where the timeout is seeded from the
* initialization control block.
......@@ -1977,7 +1978,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
*
* Returns 0 on success.
*/
static int
int
qla2x00_init_rings(scsi_qla_host_t *vha)
{
int rval;
......@@ -2012,7 +2013,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (!rsp)
continue;
/* Initialize response queue entries */
qla2x00_init_response_q_entries(rsp);
if (IS_QLAFX00(ha))
qlafx00_init_response_q_entries(rsp);
else
qla2x00_init_response_q_entries(rsp);
}
ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
......@@ -2024,11 +2028,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
if (IS_QLAFX00(ha)) {
rval = qlafx00_init_firmware(vha, ha->init_cb_size);
goto next_check;
}
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops->update_fw_options(vha);
ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
if (ha->flags.npiv_supported) {
if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
......@@ -2042,6 +2051,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
}
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
......@@ -2069,6 +2079,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
uint16_t state[5];
struct qla_hw_data *ha = vha->hw;
if (IS_QLAFX00(vha->hw))
return qlafx00_fw_ready(vha);
rval = QLA_SUCCESS;
/* 20 seconds for loop down. */
......@@ -3134,6 +3147,12 @@ void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
fcport->vha = vha;
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_reg_remote_port(vha, fcport);
return;
}
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
......@@ -3894,15 +3913,24 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0,
MK_SYNC_ALL);
vha->marker_needed = 0;
if (!IS_QLAFX00(vha->hw)) {
/*
* Issue a marker after FW becomes
* ready.
*/
qla2x00_marker(vha, req, rsp, 0, 0,
MK_SYNC_ALL);
vha->marker_needed = 0;
}
/* Remap devices on Loop. */
clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
qla2x00_configure_loop(vha);
if (IS_QLAFX00(vha->hw))
qlafx00_configure_devices(vha);
else
qla2x00_configure_loop(vha);
wait_time--;
} while (!atomic_read(&vha->loop_down_timer) &&
!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
......@@ -3968,9 +3996,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
if (fcport->drport &&
atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_rport_del(fcport);
spin_lock_irqsave(&ha->vport_slock, flags);
}
}
......
......@@ -5,6 +5,28 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
static inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 1) {
iocbs += (dsds - 1) / 5;
if ((dsds - 1) % 5)
iocbs++;
}
return iocbs;
}
/*
* qla2x00_debounce_register
* Debounce register.
......@@ -57,6 +79,17 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
return fcp;
}
static inline void
host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
{
uint32_t *isrc = (uint32_t *) src;
uint32_t *odest = (uint32_t *) dst;
uint32_t iter = bsize >> 2;
for (; iter ; iter--)
*odest++ = cpu_to_le32(*isrc++);
}
static inline void
qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
{
......@@ -213,12 +246,18 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
(sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
}
static inline int
qla2x00_gid_list_size(struct qla_hw_data *ha)
{
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
if (IS_QLAFX00(ha))
return sizeof(uint32_t) * 32;
else
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
static inline void
......
......@@ -135,7 +135,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
__constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
__constant_cpu_to_le32(CONTINUE_A64_TYPE);
return (cont_pkt);
......@@ -486,6 +487,10 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
if (ha->mqenable || IS_QLA83XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
......@@ -514,11 +519,12 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
uint16_t lun, uint8_t type)
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24;
struct mrk_entry_24xx *mrk24 = NULL;
struct mrk_entry_fx00 *mrkfx = NULL;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
req = ha->req_q_map[0];
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
if (mrk == NULL) {
......@@ -531,7 +537,15 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
if (IS_FWI2_CAPABLE(ha)) {
if (IS_QLAFX00(ha)) {
mrkfx = (struct mrk_entry_fx00 *) mrk;
mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
mrkfx->handle_hi = 0;
mrkfx->tgt_id = cpu_to_le16(loop_id);
mrkfx->lun[1] = LSB(lun);
mrkfx->lun[2] = MSB(lun);
host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
} else if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
......@@ -589,28 +603,6 @@ int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
return QLA_SUCCESS;
}
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
*/
inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > 1) {
iocbs += (dsds - 1) / 5;
if ((dsds - 1) % 5)
iocbs++;
}
return iocbs;
}
static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
......@@ -1583,7 +1575,6 @@ qla24xx_start_scsi(srb_t *sp)
return QLA_FUNCTION_FAILED;
}
/**
* qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
......@@ -1852,6 +1843,8 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
else if (IS_QLAFX00(ha))
cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
......@@ -1869,8 +1862,13 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
req->cnt -= req_cnt;
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
pkt->entry_count = req_cnt;
pkt->handle = handle;
if (IS_QLAFX00(ha)) {
WRT_REG_BYTE(&pkt->entry_count, req_cnt);
WRT_REG_WORD(&pkt->handle, handle);
} else {
pkt->entry_count = req_cnt;
pkt->handle = handle;
}
queuing_error:
return pkt;
......@@ -2625,7 +2623,16 @@ qla2x00_start_sp(srb_t *sp)
qla2x00_adisc_iocb(sp, pkt);
break;
case SRB_TM_CMD:
qla24xx_tm_iocb(sp, pkt);
IS_QLAFX00(ha) ?
qlafx00_tm_iocb(sp, pkt) :
qla24xx_tm_iocb(sp, pkt);
break;
case SRB_FXIOCB_DCMD:
case SRB_FXIOCB_BCMD:
qlafx00_fxdisc_iocb(sp, pkt);
break;
case SRB_ABT_CMD:
qlafx00_abort_iocb(sp, pkt);
break;
default:
break;
......
......@@ -16,8 +16,6 @@
#include "qla_target.h"
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
struct req_que *, uint32_t);
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
......@@ -1065,9 +1063,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
* @ha: SCSI driver HA context
* @index: SRB index
*/
static void
void
qla2x00_process_completed_request(struct scsi_qla_host *vha,
struct req_que *req, uint32_t index)
struct req_que *req, uint32_t index)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
......@@ -1101,7 +1099,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
}
}
static srb_t *
srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
struct req_que *req, void *iocb)
{
......@@ -1994,7 +1992,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
lscsi_status = scsi_status & STATUS_MASK;
lscsi_status = scsi_status & STATUS_MASK;
fcport = sp->fcport;
......@@ -2939,7 +2937,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
......@@ -2972,7 +2970,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_QLA8001(ha) && !IS_QLA82XX(ha))
!IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
......@@ -2998,9 +2996,11 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
} else if (!ha->flags.msi_enabled)
} else if (!ha->flags.msi_enabled) {
ql_dbg(ql_dbg_init, vha, 0x0125,
"INTa mode: Enabled.\n");
ha->flags.mr_intr_valid = 1;
}
clear_risc_ints:
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册