提交 afaf5a2d 编写于 作者: D David Somayajulu 提交者: James Bottomley

[SCSI] Initial Commit of qla4xxx

open-iSCSI driver for Qlogic Corporation's iSCSI HBAs
Signed-off-by: NRavi Anand <ravi.anand@qlogic.com>
Signed-off-by: NDavid Somayajulu <david.somayajulu@qlogic.com>
Signed-off-by: NDoug Maxey <dwm@bubba.enoyolf.org>
Signed-off-by: NMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: NJames Bottomley <James.Bottomley@SteelEye.com>
上级 ed542bed
......@@ -1244,6 +1244,7 @@ config SCSI_QLOGICPTI
module will be called qlogicpti.
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
......
......@@ -84,6 +84,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
......
config SCSI_QLA_ISCSI
tristate "QLogic ISP4XXX host adapter family support"
depends on PCI && SCSI
select SCSI_ISCSI_ATTRS
---help---
This driver supports the QLogic 40xx (ISP4XXX) iSCSI host
adapter family.
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
ql4_nvram.o ql4_dbg.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include <scsi/scsi_dbg.h>
static void qla4xxx_print_srb_info(struct srb * srb)
{
printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
__func__, srb->cmd, (unsigned long) srb->dma_handle);
printk("%s: fw_ddb_index = %d, lun = %d\n",
__func__, srb->fw_ddb_index, srb->cmd->device->lun);
printk("%s: iocb_tov = %d\n",
__func__, srb->iocb_tov);
printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
__func__, srb->cc_stat, srb->r_start, srb->u_start);
}
void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
{
printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
cmd->device->channel, cmd->device->id, cmd->device->lun,
cmd->cmd_len);
scsi_print_command(cmd);
printk(" seg_cnt = %d\n", cmd->use_sg);
printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
cmd->request_buffer, cmd->request_bufflen);
if (cmd->use_sg) {
struct scatterlist *sg;
sg = (struct scatterlist *)cmd->request_buffer;
printk(" SG buffer: \n");
qla4xxx_dump_buffer((caddr_t) sg,
(cmd->use_sg * sizeof(*sg)));
}
printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
cmd->transfersize);
printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
cmd->sc_data_direction);
printk(" Current time (jiffies) = 0x%lx, "
"timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
}
void __dump_registers(struct scsi_qla_host *ha)
{
uint8_t i;
for (i = 0; i < MBOX_REG_COUNT; i++) {
printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
readw(&ha->reg->mailbox[i]));
}
printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, flash_address),
readw(&ha->reg->flash_address));
printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, flash_data),
readw(&ha->reg->flash_data));
printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, ctrl_status),
readw(&ha->reg->ctrl_status));
if (is_qla4010(ha)) {
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
readw(&ha->reg->u1.isp4010.nvram));
}
else if (is_qla4022(ha)) {
printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u1.isp4022.intr_mask),
readw(&ha->reg->u1.isp4022.intr_mask));
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
readw(&ha->reg->u1.isp4022.nvram));
printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u1.isp4022.semaphore),
readw(&ha->reg->u1.isp4022.semaphore));
}
printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, req_q_in),
readw(&ha->reg->req_q_in));
printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, rsp_q_out),
readw(&ha->reg->rsp_q_out));
if (is_qla4010(ha)) {
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.ext_hw_conf),
readw(&ha->reg->u2.isp4010.ext_hw_conf));
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.port_ctrl),
readw(&ha->reg->u2.isp4010.port_ctrl));
printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.port_status),
readw(&ha->reg->u2.isp4010.port_status));
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.req_q_out),
readw(&ha->reg->u2.isp4010.req_q_out));
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
readw(&ha->reg->u2.isp4010.gp_out));
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
readw(&ha->reg->u2.isp4010.gp_in));
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.port_err_status),
readw(&ha->reg->u2.isp4010.port_err_status));
}
else if (is_qla4022(ha)) {
printk(KERN_INFO "Page 0 Registers:\n");
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.ext_hw_conf),
readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.port_ctrl),
readw(&ha->reg->u2.isp4022.p0.port_ctrl));
printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.port_status),
readw(&ha->reg->u2.isp4022.p0.port_status));
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.gp_out),
readw(&ha->reg->u2.isp4022.p0.gp_out));
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
readw(&ha->reg->u2.isp4022.p0.gp_in));
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.port_err_status),
readw(&ha->reg->u2.isp4022.p0.port_err_status));
printk(KERN_INFO "Page 1 Registers:\n");
writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
&ha->reg->ctrl_status);
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p1.req_q_out),
readw(&ha->reg->u2.isp4022.p1.req_q_out));
writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
&ha->reg->ctrl_status);
}
}
void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
int i = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (i = 1; i < MBOX_REG_COUNT; i++)
printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
readw(&ha->reg->mailbox[i]));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void qla4xxx_dump_registers(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
__dump_registers(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void qla4xxx_dump_buffer(void *b, uint32_t size)
{
uint32_t cnt;
uint8_t *c = b;
printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
"Fh\n");
printk("------------------------------------------------------------"
"--\n");
for (cnt = 0; cnt < size; cnt++, c++) {
printk(KERN_DEBUG "%02x", *c);
if (!(cnt % 16))
printk(KERN_DEBUG "\n");
else
printk(KERN_DEBUG " ");
}
if (cnt % 16)
printk(KERN_DEBUG "\n");
}
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
* Driver debug definitions.
*/
/* #define QL_DEBUG */ /* DEBUG messages */
/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
/* #define QL_DEBUG_LEVEL_4 */
/* #define QL_DEBUG_LEVEL_5 */
/* #define QL_DEBUG_LEVEL_9 */
#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
#if defined(QL_DEBUG)
#define DEBUG(x) do {x;} while (0);
#else
#define DEBUG(x) do {} while (0);
#endif
#if defined(QL_DEBUG_LEVEL_2)
#define DEBUG2(x) do {if(extended_error_logging == 2) x;} while (0);
#define DEBUG2_3(x) do {x;} while (0);
#else /* */
#define DEBUG2(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_3)
#define DEBUG3(x) do {if(extended_error_logging == 3) x;} while (0);
#else /* */
#define DEBUG3(x) do {} while (0);
#if !defined(QL_DEBUG_LEVEL_2)
#define DEBUG2_3(x) do {} while (0);
#endif /* */
#endif /* */
#if defined(QL_DEBUG_LEVEL_4)
#define DEBUG4(x) do {x;} while (0);
#else /* */
#define DEBUG4(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_5)
#define DEBUG5(x) do {x;} while (0);
#else /* */
#define DEBUG5(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_9)
#define DEBUG9(x) do {x;} while (0);
#else /* */
#define DEBUG9(x) do {} while (0);
#endif /* */
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL4_DEF_H
#define __QL4_DEF_H
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_iscsi.h>
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
#endif /* */
#define QLA_SUCCESS 0
#define QLA_ERROR 1
/*
* Data bit definitions
*/
#define BIT_0 0x1
#define BIT_1 0x2
#define BIT_2 0x4
#define BIT_3 0x8
#define BIT_4 0x10
#define BIT_5 0x20
#define BIT_6 0x40
#define BIT_7 0x80
#define BIT_8 0x100
#define BIT_9 0x200
#define BIT_10 0x400
#define BIT_11 0x800
#define BIT_12 0x1000
#define BIT_13 0x2000
#define BIT_14 0x4000
#define BIT_15 0x8000
#define BIT_16 0x10000
#define BIT_17 0x20000
#define BIT_18 0x40000
#define BIT_19 0x80000
#define BIT_20 0x100000
#define BIT_21 0x200000
#define BIT_22 0x400000
#define BIT_23 0x800000
#define BIT_24 0x1000000
#define BIT_25 0x2000000
#define BIT_26 0x4000000
#define BIT_27 0x8000000
#define BIT_28 0x10000000
#define BIT_29 0x20000000
#define BIT_30 0x40000000
#define BIT_31 0x80000000
/*
* Host adapter default definitions
***********************************/
#define MAX_HBAS 16
#define MAX_BUSES 1
#define MAX_TARGETS (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
#define MAX_LUNS 0xffff
#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */
#define MAX_DDB_ENTRIES (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
#define MAX_PDU_ENTRIES 32
#define INVALID_ENTRY 0xFFFF
#define MAX_CMDS_TO_RISC 1024
#define MAX_SRBS MAX_CMDS_TO_RISC
#define MBOX_AEN_REG_COUNT 5
#define MAX_INIT_RETRIES 5
#define IOCB_HIWAT_CUSHION 16
/*
* Buffer sizes
*/
#define REQUEST_QUEUE_DEPTH MAX_CMDS_TO_RISC
#define RESPONSE_QUEUE_DEPTH 64
#define QUEUE_SIZE 64
#define DMA_BUFFER_SIZE 512
/*
* Misc
*/
#define MAC_ADDR_LEN 6 /* in bytes */
#define IP_ADDR_LEN 4 /* in bytes */
#define DRIVER_NAME "qla4xxx"
#define MAX_LINKED_CMDS_PER_LUN 3
#define MAX_REQS_SERVICED_PER_INTR 16
#define ISCSI_IPADDR_SIZE 4 /* IP address size */
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
#define ISCSI_NAME_SIZE 255 /* ISCSI Name size -
* usually a string */
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
/*
* Retry & Timeout Values
*/
#define MBOX_TOV 60
#define SOFT_RESET_TOV 30
#define RESET_INTR_TOV 3
#define SEMAPHORE_TOV 10
#define ADAPTER_INIT_TOV 120
#define ADAPTER_RESET_TOV 180
#define EXTEND_CMD_TOV 60
#define WAIT_CMD_TOV 30
#define EH_WAIT_CMD_TOV 120
#define FIRMWARE_UP_TOV 60
#define RESET_FIRMWARE_TOV 30
#define LOGOUT_TOV 10
#define IOCB_TOV_MARGIN 10
#define RELOGIN_TOV 18
#define ISNS_DEREG_TOV 5
#define MAX_RESET_HA_RETRIES 2
/*
* SCSI Request Block structure (srb) that is placed
* on cmd->SCp location of every I/O [We have 22 bytes available]
*/
struct srb {
struct list_head list; /* (8) */
struct scsi_qla_host *ha; /* HA the SP is queued on */
struct ddb_entry *ddb;
uint16_t flags; /* (1) Status flags. */
#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */
#define SRB_GOT_SENSE BIT_4 /* sense data recieved. */
uint8_t state; /* (1) Status flags. */
#define SRB_NO_QUEUE_STATE 0 /* Request is in between states */
#define SRB_FREE_STATE 1
#define SRB_ACTIVE_STATE 3
#define SRB_ACTIVE_TIMEOUT_STATE 4
#define SRB_SUSPENDED_STATE 7 /* Request in suspended state */
struct scsi_cmnd *cmd; /* (4) SCSI command block */
dma_addr_t dma_handle; /* (4) for unmap of single transfers */
atomic_t ref_count; /* reference count for this srb */
uint32_t fw_ddb_index;
uint8_t err_id; /* error id */
#define SRB_ERR_PORT 1 /* Request failed because "port down" */
#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */
#define SRB_ERR_DEVICE 3 /* Request failed because "device error" */
#define SRB_ERR_OTHER 4
uint16_t reserved;
uint16_t iocb_tov;
uint16_t iocb_cnt; /* Number of used iocbs */
uint16_t cc_stat;
u_long r_start; /* Time we recieve a cmd from OS */
u_long u_start; /* Time when we handed the cmd to F/W */
};
/*
* Device Database (DDB) structure
*/
struct ddb_entry {
struct list_head list; /* ddb list */
struct scsi_qla_host *ha;
struct iscsi_cls_session *sess;
struct iscsi_cls_conn *conn;
atomic_t state; /* DDB State */
unsigned long flags; /* DDB Flags */
unsigned long dev_scan_wait_to_start_relogin;
unsigned long dev_scan_wait_to_complete_relogin;
uint16_t os_target_id; /* Target ID */
uint16_t fw_ddb_index; /* DDB firmware index */
uint8_t reserved[2];
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
uint32_t CmdSn;
uint16_t target_session_id;
uint16_t connection_id;
uint16_t exe_throttle; /* Max mumber of cmds outstanding
* simultaneously */
uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to
* complete */
uint16_t default_relogin_timeout; /* Max time to wait for
* relogin to complete */
uint16_t tcp_source_port_num;
uint32_t default_time2wait; /* Default Min time between
* relogins (+aens) */
atomic_t port_down_timer; /* Device connection timer */
atomic_t retry_relogin_timer; /* Min Time between relogins
* (4000 only) */
atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
atomic_t relogin_retry_count; /* Num of times relogin has been
* retried */
uint16_t port;
uint32_t tpgt;
uint8_t ip_addr[ISCSI_IPADDR_SIZE];
uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
uint8_t iscsi_alias[0x20];
};
/*
* DDB states.
*/
#define DDB_STATE_DEAD 0 /* We can no longer talk to
* this device */
#define DDB_STATE_ONLINE 1 /* Device ready to accept
* commands */
#define DDB_STATE_MISSING 2 /* Device logged off, trying
* to re-login */
/*
* DDB flags.
*/
#define DF_RELOGIN 0 /* Relogin to device */
#define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL
* logged it out */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
/*
* Asynchronous Event Queue structure
*/
struct aen {
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
};
#include "ql4_fw.h"
#include "ql4_nvram.h"
/*
* Linux Host Adapter structure
*/
struct scsi_qla_host {
/* Linux adapter configuration data */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
unsigned long flags;
#define AF_ONLINE 0 /* 0x00000001 */
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
#define DPC_ISNS_RESTART 7 /* 0x00000080 */
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
uint16_t iocb_cnt;
uint16_t iocb_hiwat;
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
/* pci information */
struct pci_dev *pdev;
struct isp_reg __iomem *reg; /* Base I/O address */
unsigned long pio_address;
unsigned long pio_length;
#define MIN_IOBASE_LEN 0x100
uint16_t req_q_count;
uint8_t marker_needed;
uint8_t rsvd1;
unsigned long host_no;
/* NVRAM registers */
struct eeprom_data *nvram;
spinlock_t hardware_lock ____cacheline_aligned;
spinlock_t list_lock;
uint32_t eeprom_cmd_data;
/* Counters for general statistics */
uint64_t adapter_error_count;
uint64_t device_error_count;
uint64_t total_io_count;
uint64_t total_mbytes_xferred;
uint64_t link_failure_count;
uint64_t invalid_crc_count;
uint32_t spurious_int_count;
uint32_t aborted_io_count;
uint32_t io_timeout_count;
uint32_t mailbox_timeout_count;
uint32_t seconds_since_last_intr;
uint32_t seconds_since_last_heartbeat;
uint32_t mac_index;
/* Info Needed for Management App */
/* --- From GetFwVersion --- */
uint32_t firmware_version[2];
uint32_t patch_number;
uint32_t build_number;
/* --- From Init_FW --- */
/* init_cb_t *init_cb; */
uint16_t firmware_options;
uint16_t tcp_options;
uint8_t ip_address[IP_ADDR_LEN];
uint8_t subnet_mask[IP_ADDR_LEN];
uint8_t gateway[IP_ADDR_LEN];
uint8_t alias[32];
uint8_t name_string[256];
uint8_t heartbeat_interval;
uint8_t rsvd;
/* --- From FlashSysInfo --- */
uint8_t my_mac[MAC_ADDR_LEN];
uint8_t serial_number[16];
/* --- From GetFwState --- */
uint32_t firmware_state;
uint32_t board_id;
uint32_t addl_fw_state;
/* Linux kernel thread */
struct workqueue_struct *dpc_thread;
struct work_struct dpc_work;
/* Linux timer thread */
struct timer_list timer;
uint32_t timer_active;
/* Recovery Timers */
uint32_t port_down_retry_count;
uint32_t discovery_wait;
atomic_t check_relogin_timeouts;
uint32_t retry_reset_ha_cnt;
uint32_t isp_reset_timer; /* reset test timer */
uint32_t nic_reset_timer; /* simulated nic reset test timer */
int eh_start;
struct list_head free_srb_q;
uint16_t free_srb_q_count;
uint16_t num_srbs_allocated;
/* DMA Memory Block */
void *queues;
dma_addr_t queues_dma;
unsigned long queues_len;
#define MEM_ALIGN_VALUE \
((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \
sizeof(struct queue_entry))
/* request and response queue variables */
dma_addr_t request_dma;
struct queue_entry *request_ring;
struct queue_entry *request_ptr;
dma_addr_t response_dma;
struct queue_entry *response_ring;
struct queue_entry *response_ptr;
dma_addr_t shadow_regs_dma;
struct shadow_regs *shadow_regs;
uint16_t request_in; /* Current indexes. */
uint16_t request_out;
uint16_t response_in;
uint16_t response_out;
/* aen queue variables */
uint16_t aen_q_count; /* Number of available aen_q entries */
uint16_t aen_in; /* Current indexes */
uint16_t aen_out;
struct aen aen_q[MAX_AEN_ENTRIES];
/* This mutex protects several threads to do mailbox commands
* concurrently.
*/
struct mutex mbox_sem;
wait_queue_head_t mailbox_wait_queue;
/* temporary mailbox status registers */
volatile uint8_t mbox_status_count;
volatile uint32_t mbox_status[MBOX_REG_COUNT];
/* local device database list (contains internal ddb entries) */
struct list_head ddb_list;
/* Map ddb_list entry by FW ddb index */
struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
};
static inline int is_qla4010(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
}
static inline int is_qla4022(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
}
static inline int adapter_up(struct scsi_qla_host *ha)
{
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
(test_bit(AF_LINK_UP, &ha->flags) != 0);
}
static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
{
return (struct scsi_qla_host *)shost->hostdata;
}
static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u1.isp4022.semaphore :
&ha->reg->u1.isp4010.nvram);
}
static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u1.isp4022.nvram :
&ha->reg->u1.isp4010.nvram);
}
static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.ext_hw_conf :
&ha->reg->u2.isp4010.ext_hw_conf);
}
static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_status :
&ha->reg->u2.isp4010.port_status);
}
static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_ctrl :
&ha->reg->u2.isp4010.port_ctrl);
}
static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_err_status :
&ha->reg->u2.isp4010.port_err_status);
}
static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.gp_out :
&ha->reg->u2.isp4010.gp_out);
}
static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
}
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask);
int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
{
if (is_qla4022(a))
return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 13);
else
return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
QL4010_FLASH_SEM_BITS);
}
static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
}
static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
{
if (is_qla4022(a))
return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 10);
else
return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
QL4010_NVRAM_SEM_BITS);
}
static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
}
static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
{
if (is_qla4022(a))
return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 1);
else
return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
QL4010_DRVR_SEM_BITS);
}
static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
}
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
/* Defines for process_aen() */
#define PROCESS_ALL_AENS 0
#define FLUSH_DDB_CHANGED_AENS 1
#define RELOGIN_DDB_CHANGED_AENS 2
#include "ql4_version.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
#endif /*_QLA4XXX_H */
此差异已折叠。
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QLA4x_GBL_H
#define __QLA4x_GBL_H
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
uint8_t renew_ddb_list);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs);
void qla4xxx_free_ddb_list(struct scsi_qla_host * ha);
void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
int qla4xxx_relogin_device(struct scsi_qla_host * ha,
struct ddb_entry * ddb_entry);
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
int lun);
int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t len);
int qla4xxx_get_firmware_status(struct scsi_qla_host * ha);
int qla4xxx_get_firmware_state(struct scsi_qla_host * ha);
int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha);
/* FIXME: Goodness! this really wants a small struct to hold the
* parameters. On x86 the args will get passed on the stack! */
int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t fw_ddb_index,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma,
uint32_t *num_valid_ddb_entries,
uint32_t *next_ddb_index,
uint32_t *fw_ddb_device_state,
uint32_t *conn_err_detail,
uint16_t *tcp_source_port_num,
uint16_t *connection_id);
struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
uint32_t fw_ddb_index);
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
dma_addr_t fw_ddb_entry_dma);
void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
u16 rd_nvram_word(struct scsi_qla_host * ha, int offset);
void qla4xxx_get_crash_record(struct scsi_qla_host * ha);
struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
int qla4xxx_add_sess(struct ddb_entry *);
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
uint16_t fw_ddb_index,
uint16_t connection_id,
uint16_t option);
int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
uint16_t fw_ddb_index);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha);
int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host * ha);
void qla4xxx_dump_buffer(void *b, uint32_t size);
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
uint32_t fw_ddb_index, uint32_t state);
extern int extended_error_logging;
extern int ql4xdiscoverywait;
extern int ql4xdontresethba;
#endif /* _QLA4x_GBL_H */
此差异已折叠。
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
*
* qla4xxx_lookup_ddb_by_fw_index
* This routine locates a device handle given the firmware device
* database index. If device doesn't exist, returns NULL.
*
* Input:
* ha - Pointer to host adapter structure.
* fw_ddb_index - Firmware's device database index
*
* Returns:
* Pointer to the corresponding internal device database structure
*/
static inline struct ddb_entry *
qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
{
struct ddb_entry *ddb_entry = NULL;
if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
(ha->fw_ddb_index_map[fw_ddb_index] !=
(struct ddb_entry *) INVALID_ENTRY)) {
ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
}
DEBUG3(printk("scsi%d: %s: index [%d], ddb_entry = %p\n",
ha->host_no, __func__, fw_ddb_index, ddb_entry));
return ddb_entry;
}
static inline void
__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha)) {
writel(set_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);
} else {
writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
set_bit(AF_INTERRUPTS_ON, &ha->flags);
}
static inline void
__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha)) {
writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);
} else {
writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
clear_bit(AF_INTERRUPTS_ON, &ha->flags);
}
static inline void
qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
__qla4xxx_enable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static inline void
qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
__qla4xxx_disable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include <scsi/scsi_tcq.h>
/**
* qla4xxx_get_req_pkt - returns a valid entry in request queue.
* @ha: Pointer to host adapter structure.
* @queue_entry: Pointer to pointer to queue entry structure
*
* This routine performs the following tasks:
* - returns the current request_in pointer (if queue not full)
* - advances the request_in pointer
* - checks for queue full
**/
int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
struct queue_entry **queue_entry)
{
uint16_t request_in;
uint8_t status = QLA_SUCCESS;
*queue_entry = ha->request_ptr;
/* get the latest request_in and request_out index */
request_in = ha->request_in;
ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
/* Advance request queue pointer and check for queue full */
if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
request_in = 0;
ha->request_ptr = ha->request_ring;
} else {
request_in++;
ha->request_ptr++;
}
/* request queue is full, try again later */
if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
/* restore request pointer */
ha->request_ptr = *queue_entry;
status = QLA_ERROR;
} else {
ha->request_in = request_in;
memset(*queue_entry, 0, sizeof(**queue_entry));
}
return status;
}
/**
* qla4xxx_send_marker_iocb - issues marker iocb to HBA
* @ha: Pointer to host adapter structure.
* @ddb_entry: Pointer to device database entry
* @lun: SCSI LUN
* @marker_type: marker identifier
*
* This routine issues a marker IOCB.
**/
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun)
{
struct marker_entry *marker_entry;
unsigned long flags = 0;
uint8_t status = QLA_SUCCESS;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Get pointer to the queue entry for the marker */
if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
QLA_SUCCESS) {
status = QLA_ERROR;
goto exit_send_marker;
}
/* Put the marker in the request queue */
marker_entry->hdr.entryType = ET_MARKER;
marker_entry->hdr.entryCount = 1;
marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
int_to_scsilun(lun, &marker_entry->lun);
wmb();
/* Tell ISP it's got a new I/O request */
writel(ha->request_in, &ha->reg->req_q_in);
readl(&ha->reg->req_q_in);
exit_send_marker:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return status;
}
struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
struct scsi_qla_host *ha)
{
struct continuation_t1_entry *cont_entry;
cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
/* Advance request queue pointer */
if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
ha->request_in = 0;
ha->request_ptr = ha->request_ring;
} else {
ha->request_in++;
ha->request_ptr++;
}
/* Load packet defaults */
cont_entry->hdr.entryType = ET_CONTINUE;
cont_entry->hdr.entryCount = 1;
cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
return cont_entry;
}
uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > COMMAND_SEG) {
iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
iocbs++;
}
return iocbs;
}
void qla4xxx_build_scsi_iocbs(struct srb *srb,
struct command_t3_entry *cmd_entry,
uint16_t tot_dsds)
{
struct scsi_qla_host *ha;
uint16_t avail_dsds;
struct data_seg_a64 *cur_dsd;
struct scsi_cmnd *cmd;
cmd = srb->cmd;
ha = srb->ha;
if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
return;
}
avail_dsds = COMMAND_SEG;
cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
/* Load data segments */
if (cmd->use_sg) {
struct scatterlist *cur_seg;
struct scatterlist *end_seg;
cur_seg = (struct scatterlist *)cmd->request_buffer;
end_seg = cur_seg + tot_dsds;
while (cur_seg < end_seg) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
struct continuation_t1_entry *cont_entry;
cont_entry = qla4xxx_alloc_cont_entry(ha);
cur_dsd =
(struct data_seg_a64 *)
&cont_entry->dataseg[0];
avail_dsds = CONTINUE_SEG;
}
sle_dma = sg_dma_address(cur_seg);
cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
avail_dsds--;
cur_dsd++;
cur_seg++;
}
} else {
cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
}
}
/**
* qla4xxx_send_command_to_isp - issues command to HBA
* @ha: pointer to host adapter structure.
* @srb: pointer to SCSI Request Block to be sent to ISP
*
* This routine is called by qla4xxx_queuecommand to build an ISP
* command and pass it to the ISP for execution.
**/
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
{
struct scsi_cmnd *cmd = srb->cmd;
struct ddb_entry *ddb_entry;
struct command_t3_entry *cmd_entry;
struct scatterlist *sg = NULL;
uint16_t tot_dsds;
uint16_t req_cnt;
unsigned long flags;
uint16_t cnt;
uint32_t index;
char tag[2];
/* Get real lun and adapter */
ddb_entry = srb->ddb;
/* Send marker(s) if needed. */
if (ha->marker_needed == 1) {
if (qla4xxx_send_marker_iocb(ha, ddb_entry,
cmd->device->lun) != QLA_SUCCESS)
return QLA_ERROR;
ha->marker_needed = 0;
}
tot_dsds = 0;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
index = (uint32_t)cmd->request->tag;
/* Calculate the number of request entries needed. */
if (cmd->use_sg) {
sg = (struct scatterlist *)cmd->request_buffer;
tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
if (tot_dsds == 0)
goto queuing_error;
} else if (cmd->request_bufflen) {
dma_addr_t req_dma;
req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
cmd->request_bufflen,
cmd->sc_data_direction);
if (dma_mapping_error(req_dma))
goto queuing_error;
srb->dma_handle = req_dma;
tot_dsds = 1;
}
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
if (ha->req_q_count < (req_cnt + 2)) {
cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
if (ha->request_in < cnt)
ha->req_q_count = cnt - ha->request_in;
else
ha->req_q_count = REQUEST_QUEUE_DEPTH -
(ha->request_in - cnt);
}
if (ha->req_q_count < (req_cnt + 2))
goto queuing_error;
/* total iocbs active */
if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
goto queuing_error;
/* Build command packet */
cmd_entry = (struct command_t3_entry *) ha->request_ptr;
memset(cmd_entry, 0, sizeof(struct command_t3_entry));
cmd_entry->hdr.entryType = ET_COMMAND;
cmd_entry->handle = cpu_to_le32(index);
cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
/* Set data transfer direction control flags
* NOTE: Look at data_direction bits iff there is data to be
* transferred, as the data direction bit is sometimed filled
* in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
if (cmd->request_bufflen) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
}
/* Set tagged queueing control flags */
cmd_entry->control_flags |= CF_SIMPLE_TAG;
if (scsi_populate_tag_msg(cmd, tag))
switch (tag[0]) {
case MSG_HEAD_TAG:
cmd_entry->control_flags |= CF_HEAD_TAG;
break;
case MSG_ORDERED_TAG:
cmd_entry->control_flags |= CF_ORDERED_TAG;
break;
}
/* Advance request queue pointer */
ha->request_in++;
if (ha->request_in == REQUEST_QUEUE_DEPTH) {
ha->request_in = 0;
ha->request_ptr = ha->request_ring;
} else
ha->request_ptr++;
qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
wmb();
/*
* Check to see if adapter is online before placing request on
* request queue. If a reset occurs and a request is in the queue,
* the firmware will still attempt to process the request, retrieving
* garbage for pointers.
*/
if (!test_bit(AF_ONLINE, &ha->flags)) {
DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
"Do not issue command.\n",
ha->host_no, __func__));
goto queuing_error;
}
srb->cmd->host_scribble = (unsigned char *)srb;
/* update counters */
srb->state = SRB_ACTIVE_STATE;
srb->flags |= SRB_DMA_VALID;
/* Track IOCB used */
ha->iocb_cnt += req_cnt;
srb->iocb_cnt = req_cnt;
ha->req_q_count -= req_cnt;
/* Debug print statements */
writel(ha->request_in, &ha->reg->req_q_in);
readl(&ha->reg->req_q_in);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (cmd->use_sg && tot_dsds) {
sg = (struct scatterlist *) cmd->request_buffer;
pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
} else if (tot_dsds)
pci_unmap_single(ha->pdev, srb->dma_handle,
cmd->request_bufflen, cmd->sc_data_direction);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;
}
此差异已折叠。
此差异已折叠。
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
static inline int eeprom_size(struct scsi_qla_host *ha)
{
return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
}
static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
{
return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
FM93C56A_NO_ADDR_BITS_16;
}
static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
{
return FM93C56A_DATA_BITS_16;
}
static int fm93c56a_select(struct scsi_qla_host * ha)
{
DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
writel(ha->eeprom_cmd_data, isp_nvram(ha));
readl(isp_nvram(ha));
return 1;
}
static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
{
int i;
int mask;
int dataBit;
int previousBit;
/* Clock in a zero, then do the start bit. */
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
mask = 1 << (FM93C56A_CMD_BITS - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < FM93C56A_CMD_BITS; i++) {
dataBit =
(cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
previousBit = dataBit;
}
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
cmd = cmd << 1;
}
mask = 1 << (eeprom_no_addr_bits(ha) - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
previousBit = dataBit;
}
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
addr = addr << 1;
}
return 1;
}
static int fm93c56a_deselect(struct scsi_qla_host * ha)
{
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
writel(ha->eeprom_cmd_data, isp_nvram(ha));
readl(isp_nvram(ha));
return 1;
}
static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
{
int i;
int data = 0;
int dataBit;
/* Read the data bits
* The first bit is a dummy. Clock right over it. */
for (i = 0; i < eeprom_no_data_bits(ha); i++) {
writel(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
dataBit =
(readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
data = (data << 1) | dataBit;
}
*value = data;
return 1;
}
static int eeprom_readword(int eepromAddr, u16 * value,
struct scsi_qla_host * ha)
{
fm93c56a_select(ha);
fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
fm93c56a_datain(ha, value);
fm93c56a_deselect(ha);
return 1;
}
/* Hardware_lock must be set before calling */
u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
{
u16 val;
/* NOTE: NVRAM uses half-word addresses */
eeprom_readword(offset, &val, ha);
return val;
}
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
{
int status = QLA_ERROR;
uint16_t checksum = 0;
uint32_t index;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (index = 0; index < eeprom_size(ha); index++)
checksum += rd_nvram_word(ha, index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (checksum == 0)
status = QLA_SUCCESS;
return status;
}
/*************************************************************************
*
* Hardware Semaphore routines
*
*************************************************************************/
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
unsigned int seconds = 30;
DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
"0x%x\n", ha->host_no, sem_mask, sem_bits));
do {
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
"code = 0x%x\n", ha->host_no,
sem_mask, sem_bits));
return QLA_SUCCESS;
}
ssleep(1);
} while (--seconds);
return QLA_ERROR;
}
void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(sem_mask, isp_semaphore(ha));
readl(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
sem_mask));
}
int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
"0x%x, sema code=0x%x\n", ha->host_no,
sem_mask, sem_bits, value));
return 1;
}
return 0;
}
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QL4XNVRM_H_
#define _QL4XNVRM_H_
/*
* AM29LV Flash definitions
*/
#define FM93C56A_SIZE_8 0x100
#define FM93C56A_SIZE_16 0x80
#define FM93C66A_SIZE_8 0x200
#define FM93C66A_SIZE_16 0x100/* 4010 */
#define FM93C86A_SIZE_16 0x400/* 4022 */
#define FM93C56A_START 0x1
// Commands
#define FM93C56A_READ 0x2
#define FM93C56A_WEN 0x0
#define FM93C56A_WRITE 0x1
#define FM93C56A_WRITE_ALL 0x0
#define FM93C56A_WDS 0x0
#define FM93C56A_ERASE 0x3
#define FM93C56A_ERASE_ALL 0x0
/* Command Extentions */
#define FM93C56A_WEN_EXT 0x3
#define FM93C56A_WRITE_ALL_EXT 0x1
#define FM93C56A_WDS_EXT 0x0
#define FM93C56A_ERASE_ALL_EXT 0x2
/* Address Bits */
#define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */
#define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */
#define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */
/* Data Bits */
#define FM93C56A_DATA_BITS_16 16
#define FM93C56A_DATA_BITS_8 8
/* Special Bits */
#define FM93C56A_READ_DUMMY_BITS 1
#define FM93C56A_READY 0
#define FM93C56A_BUSY 1
#define FM93C56A_CMD_BITS 2
/* Auburn Bits */
#define AUBURN_EEPROM_DI 0x8
#define AUBURN_EEPROM_DI_0 0x0
#define AUBURN_EEPROM_DI_1 0x8
#define AUBURN_EEPROM_DO 0x4
#define AUBURN_EEPROM_DO_0 0x0
#define AUBURN_EEPROM_DO_1 0x4
#define AUBURN_EEPROM_CS 0x2
#define AUBURN_EEPROM_CS_0 0x0
#define AUBURN_EEPROM_CS_1 0x2
#define AUBURN_EEPROM_CLK_RISE 0x1
#define AUBURN_EEPROM_CLK_FALL 0x0
/* */
/* EEPROM format */
/* */
struct bios_params {
uint16_t SpinUpDelay:1;
uint16_t BIOSDisable:1;
uint16_t MMAPEnable:1;
uint16_t BootEnable:1;
uint16_t Reserved0:12;
uint8_t bootID0:7;
uint8_t bootID0Valid:1;
uint8_t bootLUN0[8];
uint8_t bootID1:7;
uint8_t bootID1Valid:1;
uint8_t bootLUN1[8];
uint16_t MaxLunsPerTarget;
uint8_t Reserved1[10];
};
struct eeprom_port_cfg {
/* MTU MAC 0 */
u16 etherMtu_mac;
/* Flow Control MAC 0 */
u16 pauseThreshold_mac;
u16 resumeThreshold_mac;
u16 reserved[13];
};
struct eeprom_function_cfg {
u8 reserved[30];
/* MAC ADDR */
u8 macAddress[6];
u8 macAddressSecondary[6];
u16 subsysVendorId;
u16 subsysDeviceId;
};
struct eeprom_data {
union {
struct { /* isp4010 */
u8 asic_id[4]; /* x00 */
u8 version; /* x04 */
u8 reserved; /* x05 */
u16 board_id; /* x06 */
#define EEPROM_BOARDID_ELDORADO 1
#define EEPROM_BOARDID_PLACER 2
#define EEPROM_SERIAL_NUM_SIZE 16
u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
/* ExtHwConfig: */
/* Offset = 24bytes
*
* | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | |
* |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
*/
u16 ext_hw_conf; /* x18 */
u8 mac0[6]; /* x1A */
u8 mac1[6]; /* x20 */
u8 mac2[6]; /* x26 */
u8 mac3[6]; /* x2C */
u16 etherMtu; /* x32 */
u16 macConfig; /* x34 */
#define MAC_CONFIG_ENABLE_ANEG 0x0001
#define MAC_CONFIG_ENABLE_PAUSE 0x0002
u16 phyConfig; /* x36 */
#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
u16 topcat; /* x38 */
#define TOPCAT_PRESENT 0x0100
#define TOPCAT_MASK 0xFF00
#define EEPROM_UNUSED_1_SIZE 2
u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
u16 bufletSize; /* x3C */
u16 bufletCount; /* x3E */
u16 bufletPauseThreshold; /* x40 */
u16 tcpWindowThreshold50; /* x42 */
u16 tcpWindowThreshold25; /* x44 */
u16 tcpWindowThreshold0; /* x46 */
u16 ipHashTableBaseHi; /* x48 */
u16 ipHashTableBaseLo; /* x4A */
u16 ipHashTableSize; /* x4C */
u16 tcpHashTableBaseHi; /* x4E */
u16 tcpHashTableBaseLo; /* x50 */
u16 tcpHashTableSize; /* x52 */
u16 ncbTableBaseHi; /* x54 */
u16 ncbTableBaseLo; /* x56 */
u16 ncbTableSize; /* x58 */
u16 drbTableBaseHi; /* x5A */
u16 drbTableBaseLo; /* x5C */
u16 drbTableSize; /* x5E */
#define EEPROM_UNUSED_2_SIZE 4
u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
u16 ipReassemblyTimeout; /* x64 */
u16 tcpMaxWindowSizeHi; /* x66 */
u16 tcpMaxWindowSizeLo; /* x68 */
u32 net_ip_addr0; /* x6A Added for TOE
* functionality. */
u32 net_ip_addr1; /* x6E */
u32 scsi_ip_addr0; /* x72 */
u32 scsi_ip_addr1; /* x76 */
#define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account
* for ip addresses */
u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
u16 subsysVendorId_f0; /* xFA */
u16 subsysDeviceId_f0; /* xFC */
/* Address = 0x7F */
#define FM93C56A_SIGNATURE 0x9356
#define FM93C66A_SIGNATURE 0x9366
u16 signature; /* xFE */
#define EEPROM_UNUSED_4_SIZE 250
u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
u16 subsysVendorId_f1; /* x1FA */
u16 subsysDeviceId_f1; /* x1FC */
u16 checksum; /* x1FE */
} __attribute__ ((packed)) isp4010;
struct { /* isp4022 */
u8 asicId[4]; /* x00 */
u8 version; /* x04 */
u8 reserved_5; /* x05 */
u16 boardId; /* x06 */
u8 boardIdStr[16]; /* x08 */
u8 serialNumber[16]; /* x18 */
/* External Hardware Configuration */
u16 ext_hw_conf; /* x28 */
/* MAC 0 CONFIGURATION */
struct eeprom_port_cfg macCfg_port0; /* x2A */
/* MAC 1 CONFIGURATION */
struct eeprom_port_cfg macCfg_port1; /* x4A */
/* DDR SDRAM Configuration */
u16 bufletSize; /* x6A */
u16 bufletCount; /* x6C */
u16 tcpWindowThreshold50; /* x6E */
u16 tcpWindowThreshold25; /* x70 */
u16 tcpWindowThreshold0; /* x72 */
u16 ipHashTableBaseHi; /* x74 */
u16 ipHashTableBaseLo; /* x76 */
u16 ipHashTableSize; /* x78 */
u16 tcpHashTableBaseHi; /* x7A */
u16 tcpHashTableBaseLo; /* x7C */
u16 tcpHashTableSize; /* x7E */
u16 ncbTableBaseHi; /* x80 */
u16 ncbTableBaseLo; /* x82 */
u16 ncbTableSize; /* x84 */
u16 drbTableBaseHi; /* x86 */
u16 drbTableBaseLo; /* x88 */
u16 drbTableSize; /* x8A */
u16 reserved_142[4]; /* x8C */
/* TCP/IP Parameters */
u16 ipReassemblyTimeout; /* x94 */
u16 tcpMaxWindowSize; /* x96 */
u16 ipSecurity; /* x98 */
u8 reserved_156[294]; /* x9A */
u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */
struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */
u16 reserved_510; /* x1FE */
/* Address = 512 */
u8 oemSpace[432]; /* x200 */
struct bios_params sBIOSParams_fn1; /* x3B0 */
struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */
u16 reserved_1022; /* x3FE */
/* Address = 1024 */
u8 reserved_1024[464]; /* x400 */
struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */
u16 reserved_1534; /* x5FE */
/* Address = 1536 */
u8 reserved_1536[432]; /* x600 */
struct bios_params sBIOSParams_fn3; /* x7B0 */
struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */
u16 checksum; /* x7FE */
} __attribute__ ((packed)) isp4022;
};
};
#endif /* _QL4XNVRM_H_ */
此差异已折叠。
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k"
#define QL4_DRIVER_MAJOR_VER 5
#define QL4_DRIVER_MINOR_VER 0
#define QL4_DRIVER_PATCH_VER 5
#define QL4_DRIVER_BETA_VER 9
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册