提交 c8806b6c 编写于 作者: N Narsimhulu Musini 提交者: James Bottomley

snic: driver for Cisco SCSI HBA

Cisco has developed a new PCI HBA interface called sNIC, which stands for
SCSI NIC. This is a new storage feature supported on specialized network
adapter. The new PCI function provides a uniform host interface and abstracts
backend storage.

[jejb: fix up checkpatch errors]
Signed-off-by: NNarsimhulu Musini <nmusini@cisco.com>
Signed-off-by: NSesidhar Baddela <sebaddel@cisco.com>
Reviewed-by: NHannes Reinecke <hare@suse.de>
Signed-off-by: NJames Bottomley <JBottomley@Odin.com>
上级 8d2b21db
......@@ -2590,6 +2590,13 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER
M: Narsimhulu Musini <nmusini@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/snic/
CMPC ACPI DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
M: Daniel Oliveira Nascimento <don@syst.com.br>
......
......@@ -634,6 +634,23 @@ config FCOE_FNIC
<file:Documentation/scsi/scsi.txt>.
The module will be called fnic.
config SCSI_SNIC
tristate "Cisco SNIC Driver"
depends on PCI && SCSI
help
This is support for the Cisco PCI-Express SCSI HBA.
To compile this driver as a module, choose M here and read
<file:Documentation/scsi/scsi.txt>.
The module will be called snic.
config SCSI_SNIC_DEBUG_FS
bool "Cisco SNIC Driver Debugfs Support"
depends on SCSI_SNIC && DEBUG_FS
help
This enables to list debugging information from SNIC Driver
available via debugfs file system
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
......
......@@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
......
obj-$(CONFIG_SCSI_SNIC) += snic.o
snic-y := \
snic_attrs.o \
snic_main.o \
snic_res.o \
snic_isr.o \
snic_ctl.o \
snic_io.o \
snic_scsi.o \
snic_disc.o \
vnic_cq.o \
vnic_intr.o \
vnic_dev.o \
vnic_wq.o
snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
/*
* Completion queue descriptor types
*/
enum cq_desc_types {
CQ_DESC_TYPE_WQ_ENET = 0,
CQ_DESC_TYPE_DESC_COPY = 1,
CQ_DESC_TYPE_WQ_EXCH = 2,
CQ_DESC_TYPE_RQ_ENET = 3,
CQ_DESC_TYPE_RQ_FCP = 4,
};
/* Completion queue descriptor: 16B
*
* All completion queues have this basic layout. The
* type_specific area is unique for each completion
* queue type.
*/
struct cq_desc {
__le16 completed_index;
__le16 q_number;
u8 type_specific[11];
u8 type_color;
};
#define CQ_DESC_TYPE_BITS 4
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
static inline void cq_desc_dec(const struct cq_desc *desc_arg,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
const struct cq_desc *desc = desc_arg;
const u8 type_color = desc->type_color;
*color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
/*
* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
*type = type_color & CQ_DESC_TYPE_MASK;
*q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
*completed_index = le16_to_cpu(desc->completed_index) &
CQ_DESC_COMP_NDX_MASK;
}
#endif /* _CQ_DESC_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
#include "cq_desc.h"
/* Ethernet completion queue descriptor: 16B */
struct cq_enet_wq_desc {
__le16 completed_index;
__le16 q_number;
u8 reserved[11];
u8 type_color;
};
static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
{
cq_desc_dec((struct cq_desc *)desc, type,
color, q_number, completed_index);
}
#endif /* _CQ_ENET_DESC_H_ */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _SNIC_H_
#define _SNIC_H_
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/mempool.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "snic_disc.h"
#include "snic_io.h"
#include "snic_res.h"
#include "snic_trc.h"
#include "snic_stats.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_snic.h"
#define SNIC_DRV_NAME "snic"
#define SNIC_DRV_DESCRIPTION "Cisco SCSI NIC Driver"
#define SNIC_DRV_VERSION "0.0.1.18"
#define PFX SNIC_DRV_NAME ":"
#define DFX SNIC_DRV_NAME "%d: "
#define DESC_CLEAN_LOW_WATERMARK 8
#define SNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define SNIC_MAX_IO_REQ 50 /* scsi_cmnd tag map entries */
#define SNIC_MIN_IO_REQ 8 /* Min IO throttle count */
#define SNIC_IO_LOCKS 64 /* IO locks: power of 2 */
#define SNIC_DFLT_QUEUE_DEPTH 32 /* Default Queue Depth */
#define SNIC_MAX_QUEUE_DEPTH 64 /* Max Queue Depth */
#define SNIC_DFLT_CMD_TIMEOUT 90 /* Extended tmo for FW */
/*
* Tag bits used for special requests.
*/
#define SNIC_TAG_ABORT BIT(30) /* Tag indicating abort */
#define SNIC_TAG_DEV_RST BIT(29) /* Tag for device reset */
#define SNIC_TAG_IOCTL_DEV_RST BIT(28) /* Tag for User Device Reset */
#define SNIC_TAG_MASK (BIT(24) - 1) /* Mask for lookup */
#define SNIC_NO_TAG -1
/*
* Command flags to identify the type of command and for other future use
*/
#define SNIC_NO_FLAGS 0
#define SNIC_IO_INITIALIZED BIT(0)
#define SNIC_IO_ISSUED BIT(1)
#define SNIC_IO_DONE BIT(2)
#define SNIC_IO_REQ_NULL BIT(3)
#define SNIC_IO_ABTS_PENDING BIT(4)
#define SNIC_IO_ABORTED BIT(5)
#define SNIC_IO_ABTS_ISSUED BIT(6)
#define SNIC_IO_TERM_ISSUED BIT(7)
#define SNIC_IO_ABTS_TIMEDOUT BIT(8)
#define SNIC_IO_ABTS_TERM_DONE BIT(9)
#define SNIC_IO_ABTS_TERM_REQ_NULL BIT(10)
#define SNIC_IO_ABTS_TERM_TIMEDOUT BIT(11)
#define SNIC_IO_INTERNAL_TERM_PENDING BIT(12)
#define SNIC_IO_INTERNAL_TERM_ISSUED BIT(13)
#define SNIC_DEVICE_RESET BIT(14)
#define SNIC_DEV_RST_ISSUED BIT(15)
#define SNIC_DEV_RST_TIMEDOUT BIT(16)
#define SNIC_DEV_RST_ABTS_ISSUED BIT(17)
#define SNIC_DEV_RST_TERM_ISSUED BIT(18)
#define SNIC_DEV_RST_DONE BIT(19)
#define SNIC_DEV_RST_REQ_NULL BIT(20)
#define SNIC_DEV_RST_ABTS_DONE BIT(21)
#define SNIC_DEV_RST_TERM_DONE BIT(22)
#define SNIC_DEV_RST_ABTS_PENDING BIT(23)
#define SNIC_DEV_RST_PENDING BIT(24)
#define SNIC_DEV_RST_NOTSUP BIT(25)
#define SNIC_SCSI_CLEANUP BIT(26)
#define SNIC_HOST_RESET_ISSUED BIT(27)
#define SNIC_ABTS_TIMEOUT 30000 /* msec */
#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */
#define SNIC_HOST_RESET_TIMEOUT 30000 /* msec */
/*
* These are protected by the hashed req_lock.
*/
#define CMD_SP(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->rqi)
#define CMD_STATE(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->state)
#define CMD_ABTS_STATUS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->abts_status)
#define CMD_LR_STATUS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->lr_status)
#define CMD_FLAGS(Cmnd) \
(((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->flags)
#define SNIC_INVALID_CODE 0x100 /* Hdr Status val unused by firmware */
#define SNIC_MAX_TARGET 256
#define SNIC_FLAGS_NONE (0)
/* snic module params */
extern unsigned int snic_max_qdepth;
/* snic debugging */
extern unsigned int snic_log_level;
#define SNIC_MAIN_LOGGING 0x1
#define SNIC_SCSI_LOGGING 0x2
#define SNIC_ISR_LOGGING 0x8
#define SNIC_DESC_LOGGING 0x10
#define SNIC_CHECK_LOGGING(LEVEL, CMD) \
do { \
if (unlikely(snic_log_level & LEVEL)) \
do { \
CMD; \
} while (0); \
} while (0)
#define SNIC_MAIN_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_MAIN_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ## args);)
#define SNIC_SCSI_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_DISC_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_ISR_DBG(host, fmt, args...) \
SNIC_CHECK_LOGGING(SNIC_ISR_LOGGING, \
shost_printk(KERN_INFO, host, fmt, ##args);)
#define SNIC_HOST_ERR(host, fmt, args...) \
shost_printk(KERN_ERR, host, fmt, ##args)
#define SNIC_HOST_INFO(host, fmt, args...) \
shost_printk(KERN_INFO, host, fmt, ##args)
#define SNIC_INFO(fmt, args...) \
pr_info(PFX fmt, ## args)
#define SNIC_DBG(fmt, args...) \
pr_info(PFX fmt, ## args)
#define SNIC_ERR(fmt, args...) \
pr_err(PFX fmt, ## args)
#ifdef DEBUG
#define SNIC_BUG_ON(EXPR) \
({ \
if (EXPR) { \
SNIC_ERR("SNIC BUG(%s)\n", #EXPR); \
BUG_ON(EXPR); \
} \
})
#else
#define SNIC_BUG_ON(EXPR) \
({ \
if (EXPR) { \
SNIC_ERR("SNIC BUG(%s) at %s : %d\n", \
#EXPR, __func__, __LINE__); \
WARN_ON_ONCE(EXPR); \
} \
})
#endif
/* Soft assert */
#define SNIC_ASSERT_NOT_IMPL(EXPR) \
({ \
if (EXPR) {\
SNIC_INFO("Functionality not impl'ed at %s:%d\n", \
__func__, __LINE__); \
WARN_ON_ONCE(EXPR); \
} \
})
extern const char *snic_state_str[];
enum snic_intx_intr_index {
SNIC_INTX_WQ_RQ_COPYWQ,
SNIC_INTX_ERR,
SNIC_INTX_NOTIFY,
SNIC_INTX_INTR_MAX,
};
enum snic_msix_intr_index {
SNIC_MSIX_WQ,
SNIC_MSIX_IO_CMPL,
SNIC_MSIX_ERR_NOTIFY,
SNIC_MSIX_INTR_MAX,
};
struct snic_msix_entry {
int requested;
char devname[IFNAMSIZ];
irqreturn_t (*isr)(int, void *);
void *devid;
};
enum snic_state {
SNIC_INIT = 0,
SNIC_ERROR,
SNIC_ONLINE,
SNIC_OFFLINE,
SNIC_FWRESET,
};
#define SNIC_WQ_MAX 1
#define SNIC_CQ_IO_CMPL_MAX 1
#define SNIC_CQ_MAX (SNIC_WQ_MAX + SNIC_CQ_IO_CMPL_MAX)
/* firmware version information */
struct snic_fw_info {
u32 fw_ver;
u32 hid; /* u16 hid | u16 vnic id */
u32 max_concur_ios; /* max concurrent ios */
u32 max_sgs_per_cmd; /* max sgls per IO */
u32 max_io_sz; /* max io size supported */
u32 hba_cap; /* hba capabilities */
u32 max_tgts; /* max tgts supported */
u16 io_tmo; /* FW Extended timeout */
struct completion *wait; /* protected by snic lock*/
};
/*
* snic_work item : defined to process asynchronous events
*/
struct snic_work {
struct work_struct work;
u16 ev_id;
u64 *ev_data;
};
/*
* snic structure to represent SCSI vNIC
*/
struct snic {
/* snic specific members */
struct list_head list;
char name[IFNAMSIZ];
atomic_t state;
spinlock_t snic_lock;
struct completion *remove_wait;
bool in_remove;
bool stop_link_events; /* stop processing link events */
/* discovery related */
struct snic_disc disc;
/* Scsi Host info */
struct Scsi_Host *shost;
/* vnic related structures */
struct vnic_dev_bar bar0;
struct vnic_stats *stats;
unsigned long stats_time;
unsigned long stats_reset_time;
struct vnic_dev *vdev;
/* hw resource info */
unsigned int wq_count;
unsigned int cq_count;
unsigned int intr_count;
unsigned int err_intr_offset;
int link_status; /* retrieved from svnic_dev_link_status() */
u32 link_down_cnt;
/* pci related */
struct pci_dev *pdev;
struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
/* io related info */
mempool_t *req_pool[SNIC_REQ_MAX_CACHES]; /* (??) */
____cacheline_aligned spinlock_t io_req_lock[SNIC_IO_LOCKS];
/* Maintain snic specific commands, cmds with no tag in spl_cmd_list */
____cacheline_aligned spinlock_t spl_cmd_lock;
struct list_head spl_cmd_list;
unsigned int max_tag_id;
atomic_t ios_inflight; /* io in flight counter */
struct vnic_snic_config config;
struct work_struct link_work;
/* firmware information */
struct snic_fw_info fwinfo;
/* Work for processing Target related work */
struct work_struct tgt_work;
/* Work for processing Discovery */
struct work_struct disc_work;
/* stats related */
unsigned int reset_stats;
atomic64_t io_cmpl_skip;
struct snic_stats s_stats; /* Per SNIC driver stats */
/* platform specific */
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
struct dentry *stats_host; /* Per snic debugfs root */
struct dentry *stats_file; /* Per snic debugfs file */
struct dentry *reset_stats_file;/* Per snic reset stats file */
#endif
/* completion queue cache line section */
____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX];
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX];
spinlock_t wq_lock[SNIC_WQ_MAX];
/* interrupt resource cache line section */
____cacheline_aligned struct vnic_intr intr[SNIC_MSIX_INTR_MAX];
}; /* end of snic structure */
/*
* SNIC Driver's Global Data
*/
struct snic_global {
struct list_head snic_list;
spinlock_t snic_list_lock;
struct kmem_cache *req_cache[SNIC_REQ_MAX_CACHES];
struct workqueue_struct *event_q;
#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
/* debugfs related global data */
struct dentry *trc_root;
struct dentry *stats_root;
struct snic_trc trc ____cacheline_aligned;
#endif
};
extern struct snic_global *snic_glob;
int snic_glob_init(void);
void snic_glob_cleanup(void);
extern struct workqueue_struct *snic_event_queue;
extern struct device_attribute *snic_attrs[];
int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int snic_abort_cmd(struct scsi_cmnd *);
int snic_device_reset(struct scsi_cmnd *);
int snic_host_reset(struct scsi_cmnd *);
int snic_reset(struct Scsi_Host *, struct scsi_cmnd *);
void snic_shutdown_scsi_cleanup(struct snic *);
int snic_request_intr(struct snic *);
void snic_free_intr(struct snic *);
int snic_set_intr_mode(struct snic *);
void snic_clear_intr_mode(struct snic *);
int snic_fwcq_cmpl_handler(struct snic *, int);
int snic_wq_cmpl_handler(struct snic *, int);
void snic_free_wq_buf(struct vnic_wq *, struct vnic_wq_buf *);
void snic_log_q_error(struct snic *);
void snic_handle_link_event(struct snic *);
void snic_handle_link(struct work_struct *);
int snic_queue_exch_ver_req(struct snic *);
int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
void snic_handle_untagged_req(struct snic *, struct snic_req_info *);
void snic_release_untagged_req(struct snic *, struct snic_req_info *);
void snic_free_all_untagged_reqs(struct snic *);
int snic_get_conf(struct snic *);
void snic_set_state(struct snic *, enum snic_state);
int snic_get_state(struct snic *);
const char *snic_state_to_str(unsigned int);
void snic_hex_dump(char *, char *, int);
void snic_print_desc(const char *fn, char *os_buf, int len);
const char *show_opcode_name(int val);
#endif /* _SNIC_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
#include "snic.h"
static ssize_t
snic_show_sym_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", snic->name);
}
static ssize_t
snic_show_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n",
snic_state_str[snic_get_state(snic)]);
}
static ssize_t
snic_show_drv_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION);
}
static ssize_t
snic_show_link_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct snic *snic = shost_priv(class_to_shost(dev));
if (snic->config.xpt_type == SNIC_DAS)
snic->link_status = svnic_dev_link_status(snic->vdev);
return snprintf(buf, PAGE_SIZE, "%s\n",
(snic->link_status) ? "Link Up" : "Link Down");
}
static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL);
static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL);
struct device_attribute *snic_attrs[] = {
&dev_attr_snic_sym_name,
&dev_attr_snic_state,
&dev_attr_drv_version,
&dev_attr_link_state,
NULL,
};
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include <linux/ctype.h>
#include "snic_io.h"
#include "snic.h"
#include "cq_enet_desc.h"
#include "snic_fwint.h"
/*
* snic_handle_link : Handles link flaps.
*/
void
snic_handle_link(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, link_work);
if (snic->config.xpt_type != SNIC_DAS) {
SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
SNIC_ASSERT_NOT_IMPL(1);
return;
}
snic->link_status = svnic_dev_link_status(snic->vdev);
snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
((snic->link_status) ? "Up" : "Down"));
}
/*
* snic_ver_enc : Encodes version str to int
* version string is similar to netmask string
*/
static int
snic_ver_enc(const char *s)
{
int v[4] = {0};
int i = 0, x = 0;
char c;
const char *p = s;
/* validate version string */
if ((strlen(s) > 15) || (strlen(s) < 7))
goto end;
while ((c = *p++)) {
if (c == '.') {
i++;
continue;
}
if (i > 4 || !isdigit(c))
goto end;
v[i] = v[i] * 10 + (c - '0');
}
/* validate sub version numbers */
for (i = 3; i >= 0; i--)
if (v[i] > 0xff)
goto end;
x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3];
end:
if (x == 0) {
SNIC_ERR("Invalid version string [%s].\n", s);
return -1;
}
return x;
} /* end of snic_ver_enc */
/*
* snic_qeueue_exch_ver_req :
*
* Queues Exchange Version Request, to communicate host information
* in return, it gets firmware version details
*/
int
snic_queue_exch_ver_req(struct snic *snic)
{
struct snic_req_info *rqi = NULL;
struct snic_host_req *req = NULL;
u32 ver = 0;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n");
rqi = snic_req_init(snic, 0);
if (!rqi) {
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
ret = -ENOMEM;
goto error;
}
req = rqi_to_req(rqi);
/* Initialize snic_host_req */
snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG,
snic->config.hid, 0, (ulong)rqi);
ver = snic_ver_enc(SNIC_DRV_VERSION);
req->u.exch_ver.drvr_ver = cpu_to_le32(ver);
req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX);
snic_handle_untagged_req(snic, rqi);
ret = snic_queue_wq_desc(snic, req, sizeof(*req));
if (ret) {
snic_release_untagged_req(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Queuing Exch Ver Req failed, err = %d\n",
ret);
goto error;
}
SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret);
error:
return ret;
} /* end of snic_queue_exch_ver_req */
/*
* snic_io_exch_ver_cmpl_handler
*/
int
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl;
u8 typ, hdr_stat;
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_BUG_ON(snic->config.hid != hid);
rqi = (struct snic_req_info *) ctx;
if (hdr_stat) {
SNIC_HOST_ERR(snic->shost,
"Exch Ver Completed w/ err status %d\n",
hdr_stat);
goto exch_cmpl_end;
}
spin_lock_irqsave(&snic->snic_lock, flags);
snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version);
snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid);
snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios);
snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd);
snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz);
snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts);
snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout);
SNIC_HOST_INFO(snic->shost,
"vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n",
snic->fwinfo.fw_ver,
snic->fwinfo.hid,
snic->fwinfo.max_concur_ios,
snic->fwinfo.max_sgs_per_cmd,
snic->fwinfo.max_io_sz,
snic->fwinfo.max_tgts,
snic->fwinfo.io_tmo);
SNIC_HOST_INFO(snic->shost,
"HBA Capabilities = 0x%x\n",
le32_to_cpu(exv_cmpl->hba_cap));
/* Updating SGList size */
max_sgs = snic->fwinfo.max_sgs_per_cmd;
if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) {
snic->shost->sg_tablesize = max_sgs;
SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n",
snic->shost->sg_tablesize);
} else if (max_sgs > snic->shost->sg_tablesize) {
SNIC_HOST_INFO(snic->shost,
"Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n",
snic->config.xpt_type, max_sgs,
snic->shost->sg_tablesize);
}
if (snic->shost->can_queue > snic->fwinfo.max_concur_ios)
snic->shost->can_queue = snic->fwinfo.max_concur_ios;
snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9;
if (snic->fwinfo.wait)
complete(snic->fwinfo.wait);
spin_unlock_irqrestore(&snic->snic_lock, flags);
exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
* snic_get_conf
*
* Synchronous call, and Retrieves snic params.
*/
int
snic_get_conf(struct snic *snic)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
int ret;
int nr_retries = 3;
SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n");
spin_lock_irqsave(&snic->snic_lock, flags);
memset(&snic->fwinfo, 0, sizeof(snic->fwinfo));
snic->fwinfo.wait = &wait;
spin_unlock_irqrestore(&snic->snic_lock, flags);
/* Additional delay to handle HW Resource initialization. */
msleep(50);
/*
* Exch ver req can be ignored by FW, if HW Resource initialization
* is in progress, Hence retry.
*/
do {
ret = snic_queue_exch_ver_req(snic);
if (ret)
return ret;
wait_for_completion_timeout(&wait, msecs_to_jiffies(2000));
spin_lock_irqsave(&snic->snic_lock, flags);
ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT;
if (ret)
SNIC_HOST_ERR(snic->shost,
"Failed to retrieve snic params,\n");
/* Unset fwinfo.wait, on success or on last retry */
if (ret == 0 || nr_retries == 1)
snic->fwinfo.wait = NULL;
spin_unlock_irqrestore(&snic->snic_lock, flags);
} while (ret && --nr_retries);
return ret;
} /* end of snic_get_info */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include "snic.h"
/*
* snic_debugfs_init - Initialize debugfs for snic debug logging
*
* Description:
* When Debugfs is configured this routine sets up fnic debugfs
* filesystem. If not already created. this routine will crate the
* fnic directory and statistics directory for trace buffer and
* stats logging
*/
int
snic_debugfs_init(void)
{
int rc = -1;
struct dentry *de = NULL;
de = debugfs_create_dir("snic", NULL);
if (!de) {
SNIC_DBG("Cannot create debugfs root\n");
return rc;
}
snic_glob->trc_root = de;
de = debugfs_create_dir("statistics", snic_glob->trc_root);
if (!de) {
SNIC_DBG("Cannot create Statistics directory\n");
return rc;
}
snic_glob->stats_root = de;
rc = 0;
return rc;
} /* end of snic_debugfs_init */
/*
* snic_debugfs_term - Tear down debugfs intrastructure
*
* Description:
* When Debufs is configured this routine removes debugfs file system
* elements that are specific to snic
*/
void
snic_debugfs_term(void)
{
debugfs_remove(snic_glob->stats_root);
snic_glob->stats_root = NULL;
debugfs_remove(snic_glob->trc_root);
snic_glob->trc_root = NULL;
}
/*
* snic_reset_stats_open - Open the reset_stats file
*/
static int
snic_reset_stats_open(struct inode *inode, struct file *filp)
{
SNIC_BUG_ON(!inode->i_private);
filp->private_data = inode->i_private;
return 0;
}
/*
* snic_reset_stats_read - Read a reset_stats debugfs file
* @filp: The file pointer to read from.
* @ubuf: The buffer tocopy the data to.
* @cnt: The number of bytes to read.
* @ppos: The position in the file to start reading frm.
*
* Description:
* This routine reads value of variable reset_stats
* and stores into local @buf. It will start reading file @ppos and
* copy up to @cnt of data to @ubuf from @buf.
*
* Returns:
* This function returns the amount of data that was read.
*/
static ssize_t
snic_reset_stats_read(struct file *filp,
char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct snic *snic = (struct snic *) filp->private_data;
char buf[64];
int len;
len = sprintf(buf, "%u\n", snic->reset_stats);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
/*
* snic_reset_stats_write - Write to reset_stats debugfs file
* @filp: The file pointer to write from
* @ubuf: The buffer to copy the data from.
* @cnt: The number of bytes to write.
* @ppos: The position in the file to start writing to.
*
* Description:
* This routine writes data from user buffer @ubuf to buffer @buf and
* resets cumulative stats of snic.
*
* Returns:
* This function returns the amount of data that was written.
*/
static ssize_t
snic_reset_stats_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct snic *snic = (struct snic *) filp->private_data;
struct snic_stats *stats = &snic->s_stats;
u64 *io_stats_p = (u64 *) &stats->io;
u64 *fw_stats_p = (u64 *) &stats->fw;
char buf[64];
unsigned long val;
int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = '\0';
ret = kstrtoul(buf, 10, &val);
if (ret < 0)
return ret;
snic->reset_stats = val;
if (snic->reset_stats) {
/* Skip variable is used to avoid descrepancies to Num IOs
* and IO Completions stats. Skip incrementing No IO Compls
* for pending active IOs after reset_stats
*/
atomic64_set(&snic->io_cmpl_skip,
atomic64_read(&stats->io.active));
memset(&stats->abts, 0, sizeof(struct snic_abort_stats));
memset(&stats->reset, 0, sizeof(struct snic_reset_stats));
memset(&stats->misc, 0, sizeof(struct snic_misc_stats));
memset(io_stats_p+1,
0,
sizeof(struct snic_io_stats) - sizeof(u64));
memset(fw_stats_p+1,
0,
sizeof(struct snic_fw_stats) - sizeof(u64));
}
(*ppos)++;
SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n");
return cnt;
}
static int
snic_reset_stats_release(struct inode *inode, struct file *filp)
{
filp->private_data = NULL;
return 0;
}
/*
* snic_stats_show - Formats and prints per host specific driver stats.
*/
static int
snic_stats_show(struct seq_file *sfp, void *data)
{
struct snic *snic = (struct snic *) sfp->private;
struct snic_stats *stats = &snic->s_stats;
struct timespec last_isr_tms, last_ack_tms;
u64 maxio_tm;
int i;
/* Dump IO Stats */
seq_printf(sfp,
"------------------------------------------\n"
"\t\t IO Statistics\n"
"------------------------------------------\n");
maxio_tm = (u64) atomic64_read(&stats->io.max_time);
seq_printf(sfp,
"Active IOs : %lld\n"
"Max Active IOs : %lld\n"
"Total IOs : %lld\n"
"IOs Completed : %lld\n"
"IOs Failed : %lld\n"
"IOs Not Found : %lld\n"
"Memory Alloc Failures : %lld\n"
"REQs Null : %lld\n"
"SCSI Cmd Pointers Null : %lld\n"
"Max SGL for any IO : %lld\n"
"Max IO Size : %lld Sectors\n"
"Max Queuing Time : %lld\n"
"Max Completion Time : %lld\n"
"Max IO Process Time(FW) : %lld (%u msec)\n",
(u64) atomic64_read(&stats->io.active),
(u64) atomic64_read(&stats->io.max_active),
(u64) atomic64_read(&stats->io.num_ios),
(u64) atomic64_read(&stats->io.compl),
(u64) atomic64_read(&stats->io.fail),
(u64) atomic64_read(&stats->io.io_not_found),
(u64) atomic64_read(&stats->io.alloc_fail),
(u64) atomic64_read(&stats->io.req_null),
(u64) atomic64_read(&stats->io.sc_null),
(u64) atomic64_read(&stats->io.max_sgl),
(u64) atomic64_read(&stats->io.max_io_sz),
(u64) atomic64_read(&stats->io.max_qtime),
(u64) atomic64_read(&stats->io.max_cmpl_time),
maxio_tm,
jiffies_to_msecs(maxio_tm));
seq_puts(sfp, "\nSGL Counters\n");
for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) {
seq_printf(sfp,
"%10lld ",
(u64) atomic64_read(&stats->io.sgl_cnt[i]));
if ((i + 1) % 8 == 0)
seq_puts(sfp, "\n");
}
/* Dump Abort Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Abort Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"Aborts : %lld\n"
"Aborts Fail : %lld\n"
"Aborts Driver Timeout : %lld\n"
"Abort FW Timeout : %lld\n"
"Abort IO NOT Found : %lld\n",
(u64) atomic64_read(&stats->abts.num),
(u64) atomic64_read(&stats->abts.fail),
(u64) atomic64_read(&stats->abts.drv_tmo),
(u64) atomic64_read(&stats->abts.fw_tmo),
(u64) atomic64_read(&stats->abts.io_not_found));
/* Dump Reset Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Reset Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"HBA Resets : %lld\n"
"HBA Reset Cmpls : %lld\n"
"HBA Reset Fail : %lld\n",
(u64) atomic64_read(&stats->reset.hba_resets),
(u64) atomic64_read(&stats->reset.hba_reset_cmpl),
(u64) atomic64_read(&stats->reset.hba_reset_fail));
/* Dump Firmware Stats */
seq_printf(sfp,
"\n-------------------------------------------\n"
"\t\t Firmware Statistics\n"
"---------------------------------------------\n");
seq_printf(sfp,
"Active FW Requests : %lld\n"
"Max FW Requests : %lld\n"
"FW Out Of Resource Errs : %lld\n"
"FW IO Errors : %lld\n"
"FW SCSI Errors : %lld\n",
(u64) atomic64_read(&stats->fw.actv_reqs),
(u64) atomic64_read(&stats->fw.max_actv_reqs),
(u64) atomic64_read(&stats->fw.out_of_res),
(u64) atomic64_read(&stats->fw.io_errs),
(u64) atomic64_read(&stats->fw.scsi_errs));
/* Dump Miscellenous Stats */
seq_printf(sfp,
"\n---------------------------------------------\n"
"\t\t Other Statistics\n"
"\n---------------------------------------------\n");
jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms);
jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms);
seq_printf(sfp,
"Last ISR Time : %llu (%8lu.%8lu)\n"
"Last Ack Time : %llu (%8lu.%8lu)\n"
"ISRs : %llu\n"
"Max CQ Entries : %lld\n"
"Data Count Mismatch : %lld\n"
"IOs w/ Timeout Status : %lld\n"
"IOs w/ Aborted Status : %lld\n"
"IOs w/ SGL Invalid Stat : %lld\n"
"WQ Desc Alloc Fail : %lld\n"
"Queue Full : %lld\n"
"Target Not Ready : %lld\n",
(u64) stats->misc.last_isr_time,
last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
(u64)stats->misc.last_ack_time,
last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
(u64) atomic64_read(&stats->misc.isr_cnt),
(u64) atomic64_read(&stats->misc.max_cq_ents),
(u64) atomic64_read(&stats->misc.data_cnt_mismat),
(u64) atomic64_read(&stats->misc.io_tmo),
(u64) atomic64_read(&stats->misc.io_aborted),
(u64) atomic64_read(&stats->misc.sgl_inval),
(u64) atomic64_read(&stats->misc.wq_alloc_fail),
(u64) atomic64_read(&stats->misc.qfull),
(u64) atomic64_read(&stats->misc.tgt_not_rdy));
return 0;
}
/*
* snic_stats_open - Open the stats file for specific host
*
* Description:
* This routine opens a debugfs file stats of specific host
*/
static int
snic_stats_open(struct inode *inode, struct file *filp)
{
return single_open(filp, snic_stats_show, inode->i_private);
}
static const struct file_operations snic_stats_fops = {
.owner = THIS_MODULE,
.open = snic_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations snic_reset_stats_fops = {
.owner = THIS_MODULE,
.open = snic_reset_stats_open,
.read = snic_reset_stats_read,
.write = snic_reset_stats_write,
.release = snic_reset_stats_release,
};
/*
* snic_stats_init - Initialize stats struct and create stats file
* per snic
*
* Description:
* When debugfs is cofigured this routine sets up the stats file per snic
* It will create file stats and reset_stats under statistics/host# directory
* to log per snic stats
*/
int
snic_stats_debugfs_init(struct snic *snic)
{
int rc = -1;
char name[16];
struct dentry *de = NULL;
snprintf(name, sizeof(name), "host%d", snic->shost->host_no);
if (!snic_glob->stats_root) {
SNIC_DBG("snic_stats root doesn't exist\n");
return rc;
}
de = debugfs_create_dir(name, snic_glob->stats_root);
if (!de) {
SNIC_DBG("Cannot create host directory\n");
return rc;
}
snic->stats_host = de;
de = debugfs_create_file("stats",
S_IFREG|S_IRUGO,
snic->stats_host,
snic,
&snic_stats_fops);
if (!de) {
SNIC_DBG("Cannot create host's stats file\n");
return rc;
}
snic->stats_file = de;
de = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
snic->stats_host,
snic,
&snic_reset_stats_fops);
if (!de) {
SNIC_DBG("Cannot create host's reset_stats file\n");
return rc;
}
snic->reset_stats_file = de;
rc = 0;
return rc;
} /* end of snic_stats_debugfs_init */
/*
* snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
*
* Description:
* When Debufs is configured this routine removes debugfs file system
* elements that are specific to to snic stats
*/
void
snic_stats_debugfs_remove(struct snic *snic)
{
debugfs_remove(snic->stats_file);
snic->stats_file = NULL;
debugfs_remove(snic->reset_stats_file);
snic->reset_stats_file = NULL;
debugfs_remove(snic->stats_host);
snic->stats_host = NULL;
}
/* Trace Facility related API */
static void *
snic_trc_seq_start(struct seq_file *sfp, loff_t *pos)
{
return &snic_glob->trc;
}
static void *
snic_trc_seq_next(struct seq_file *sfp, void *data, loff_t *pos)
{
return NULL;
}
static void
snic_trc_seq_stop(struct seq_file *sfp, void *data)
{
}
#define SNIC_TRC_PBLEN 256
static int
snic_trc_seq_show(struct seq_file *sfp, void *data)
{
char buf[SNIC_TRC_PBLEN];
if (snic_get_trc_data(buf, SNIC_TRC_PBLEN) > 0)
seq_printf(sfp, "%s\n", buf);
return 0;
}
static const struct seq_operations snic_trc_seq_ops = {
.start = snic_trc_seq_start,
.next = snic_trc_seq_next,
.stop = snic_trc_seq_stop,
.show = snic_trc_seq_show,
};
static int
snic_trc_open(struct inode *inode, struct file *filp)
{
return seq_open(filp, &snic_trc_seq_ops);
}
static const struct file_operations snic_trc_fops = {
.owner = THIS_MODULE,
.open = snic_trc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
* under debugfs
*/
int
snic_trc_debugfs_init(void)
{
struct dentry *de = NULL;
int ret = -1;
if (!snic_glob->trc_root) {
SNIC_ERR("Debugfs root directory for snic doesn't exist.\n");
return ret;
}
de = debugfs_create_bool("tracing_enable",
S_IFREG | S_IRUGO | S_IWUSR,
snic_glob->trc_root,
&snic_glob->trc.enable);
if (!de) {
SNIC_ERR("Can't create trace_enable file.\n");
return ret;
}
snic_glob->trc.trc_enable = de;
de = debugfs_create_file("trace",
S_IFREG | S_IRUGO | S_IWUSR,
snic_glob->trc_root,
NULL,
&snic_trc_fops);
if (!de) {
SNIC_ERR("Cann't create trace file.\n");
return ret;
}
snic_glob->trc.trc_file = de;
ret = 0;
return ret;
} /* end of snic_trc_debugfs_init */
/*
* snic_trc_debugfs_term : cleans up the files created for trace under debugfs
*/
void
snic_trc_debugfs_term(void)
{
debugfs_remove(snic_glob->trc.trc_file);
snic_glob->trc.trc_file = NULL;
debugfs_remove(snic_glob->trc.trc_enable);
snic_glob->trc.trc_enable = NULL;
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include "snic_disc.h"
#include "snic.h"
#include "snic_io.h"
/* snic target types */
static const char * const snic_tgt_type_str[] = {
[SNIC_TGT_DAS] = "DAS",
[SNIC_TGT_SAN] = "SAN",
};
static inline const char *
snic_tgt_type_to_str(int typ)
{
return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
snic_tgt_type_str[typ] : "Unknown");
}
static const char * const snic_tgt_state_str[] = {
[SNIC_TGT_STAT_INIT] = "INIT",
[SNIC_TGT_STAT_ONLINE] = "ONLINE",
[SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
[SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
};
const char *
snic_tgt_state_to_str(int state)
{
return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
snic_tgt_state_str[state] : "UNKNOWN");
}
/*
* Initiate report_tgt req desc
*/
static void
snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
dma_addr_t rsp_buf_pa, ulong ctx)
{
struct snic_sg_desc *sgd = NULL;
snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
1, ctx);
req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
sgd = req_to_sgl(req);
sgd[0].addr = cpu_to_le64(rsp_buf_pa);
sgd[0].len = cpu_to_le32(len);
sgd[0]._resvd = 0;
req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
}
/*
* snic_queue_report_tgt_req: Queues report target request.
*/
static int
snic_queue_report_tgt_req(struct snic *snic)
{
struct snic_req_info *rqi = NULL;
u32 ntgts, buf_len = 0;
u8 *buf = NULL;
dma_addr_t pa = 0;
int ret = 0;
rqi = snic_req_init(snic, 1);
if (!rqi) {
ret = -ENOMEM;
goto error;
}
if (snic->fwinfo.max_tgts)
ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
else
ntgts = snic->shost->max_id;
/* Allocate Response Buffer */
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
ret = -ENOMEM;
goto error;
}
SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
kfree(buf);
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf);
ret = -EINVAL;
goto error;
}
SNIC_BUG_ON(pa == 0);
rqi->sge_va = (ulong) buf;
snic_report_tgt_init(rqi->req,
snic->config.hid,
buf,
buf_len,
pa,
(ulong)rqi);
snic_handle_untagged_req(snic, rqi);
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret) {
pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
kfree(buf);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
goto error;
}
SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
return ret;
error:
SNIC_HOST_ERR(snic->shost,
"Queuing Report Targets Failed, err = %d\n",
ret);
return ret;
} /* end of snic_queue_report_tgt_req */
/* call into SML */
static void
snic_scsi_scan_tgt(struct work_struct *work)
{
struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
unsigned long flags;
SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
scsi_scan_target(&tgt->dev,
tgt->channel,
tgt->scsi_tgt_id,
SCAN_WILD_CARD,
1);
spin_lock_irqsave(shost->host_lock, flags);
tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
spin_unlock_irqrestore(shost->host_lock, flags);
} /* end of snic_scsi_scan_tgt */
/*
* snic_tgt_lookup :
*/
static struct snic_tgt *
snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
{
struct list_head *cur, *nxt;
struct snic_tgt *tgt = NULL;
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
tgt = list_entry(cur, struct snic_tgt, list);
if (tgt->id == le32_to_cpu(tgtid->tgt_id))
return tgt;
tgt = NULL;
}
return tgt;
} /* end of snic_tgt_lookup */
/*
* snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
*/
void
snic_tgt_dev_release(struct device *dev)
{
struct snic_tgt *tgt = dev_to_tgt(dev);
SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
"Target Device ID %d (%s) Permanently Deleted.\n",
tgt->id,
dev_name(dev));
SNIC_BUG_ON(!list_empty(&tgt->list));
kfree(tgt);
}
/*
* snic_tgt_del : work function to delete snic_tgt
*/
static void
snic_tgt_del(struct work_struct *work)
{
struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
if (tgt->flags & SNIC_TGT_SCAN_PENDING)
scsi_flush_work(shost);
/* Block IOs on child devices, stops new IOs */
scsi_target_block(&tgt->dev);
/* Cleanup IOs */
snic_tgt_scsi_abort_io(tgt);
/* Unblock IOs now, to flush if there are any. */
scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
/* Delete SCSI Target and sdevs */
scsi_remove_target(&tgt->dev); /* ?? */
device_del(&tgt->dev);
put_device(&tgt->dev);
} /* end of snic_tgt_del */
/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
* it creates one.
*/
static struct snic_tgt *
snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
{
struct snic_tgt *tgt = NULL;
unsigned long flags;
int ret;
tgt = snic_tgt_lookup(snic, tgtid);
if (tgt) {
/* update the information if required */
return tgt;
}
tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
if (!tgt) {
SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
ret = -ENOMEM;
return tgt;
}
INIT_LIST_HEAD(&tgt->list);
tgt->id = le32_to_cpu(tgtid->tgt_id);
tgt->channel = 0;
SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
/*
* Plugging into SML Device Tree
*/
tgt->tdata.disc_id = 0;
tgt->state = SNIC_TGT_STAT_INIT;
device_initialize(&tgt->dev);
tgt->dev.parent = get_device(&snic->shost->shost_gendev);
tgt->dev.release = snic_tgt_dev_release;
INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
INIT_WORK(&tgt->del_work, snic_tgt_del);
switch (tgt->tdata.typ) {
case SNIC_TGT_DAS:
dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
case SNIC_TGT_SAN:
dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
default:
SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
snic->shost->host_no, tgt->channel, tgt->id);
break;
}
spin_lock_irqsave(snic->shost->host_lock, flags);
list_add_tail(&tgt->list, &snic->disc.tgt_list);
tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
tgt->state = SNIC_TGT_STAT_ONLINE;
spin_unlock_irqrestore(snic->shost->host_lock, flags);
SNIC_HOST_INFO(snic->shost,
"Tgt %d, type = %s detected. Adding..\n",
tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
ret = device_add(&tgt->dev);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"Snic Tgt: device_add, with err = %d\n",
ret);
put_device(&snic->shost->shost_gendev);
kfree(tgt);
tgt = NULL;
return tgt;
}
SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
scsi_queue_work(snic->shost, &tgt->scan_work);
return tgt;
} /* end of snic_tgt_create */
/* Handler for discovery */
void
snic_handle_tgt_disc(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, tgt_work);
struct snic_tgt_id *tgtid = NULL;
struct snic_tgt *tgt = NULL;
unsigned long flags;
int i;
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic->in_remove) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
kfree(snic->disc.rtgt_info);
return;
}
spin_unlock_irqrestore(&snic->snic_lock, flags);
mutex_lock(&snic->disc.mutex);
/* Discover triggered during disc in progress */
if (snic->disc.req_cnt) {
snic->disc.state = SNIC_DISC_DONE;
snic->disc.req_cnt = 0;
mutex_unlock(&snic->disc.mutex);
kfree(snic->disc.rtgt_info);
snic->disc.rtgt_info = NULL;
SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
/* Start Discovery Again */
snic_disc_start(snic);
return;
}
tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
for (i = 0; i < snic->disc.rtgt_cnt; i++) {
tgt = snic_tgt_create(snic, &tgtid[i]);
if (!tgt) {
int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
break;
}
}
snic->disc.rtgt_info = NULL;
snic->disc.state = SNIC_DISC_DONE;
mutex_unlock(&snic->disc.mutex);
SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
kfree(tgtid);
} /* end of snic_handle_tgt_disc */
int
snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
u8 typ, cmpl_stat;
u32 cmnd_id, hid, tgt_cnt = 0;
ulong ctx;
struct snic_req_info *rqi = NULL;
struct snic_tgt_id *tgtid;
int i, ret = 0;
snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
rqi = (struct snic_req_info *) ctx;
tgtid = (struct snic_tgt_id *) rqi->sge_va;
tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
if (tgt_cnt == 0) {
SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
ret = 1;
goto end;
}
/* printing list of targets here */
SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
for (i = 0; i < tgt_cnt; i++)
SNIC_HOST_INFO(snic->shost,
"Tgt id = 0x%x\n",
le32_to_cpu(tgtid[i].tgt_id));
/*
* Queue work for further processing,
* Response Buffer Memory is freed after creating targets
*/
snic->disc.rtgt_cnt = tgt_cnt;
snic->disc.rtgt_info = (u8 *) tgtid;
queue_work(snic_glob->event_q, &snic->tgt_work);
ret = 0;
end:
/* Unmap Response Buffer */
snic_pci_unmap_rsp_buf(snic, rqi);
if (ret)
kfree(tgtid);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
return ret;
} /* end of snic_report_tgt_cmpl_handler */
/* Discovery init fn */
void
snic_disc_init(struct snic_disc *disc)
{
INIT_LIST_HEAD(&disc->tgt_list);
mutex_init(&disc->mutex);
disc->disc_id = 0;
disc->nxt_tgt_id = 0;
disc->state = SNIC_DISC_INIT;
disc->req_cnt = 0;
disc->rtgt_cnt = 0;
disc->rtgt_info = NULL;
disc->cb = NULL;
} /* end of snic_disc_init */
/* Discovery, uninit fn */
void
snic_disc_term(struct snic *snic)
{
struct snic_disc *disc = &snic->disc;
mutex_lock(&disc->mutex);
if (disc->req_cnt) {
disc->req_cnt = 0;
SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
}
mutex_unlock(&disc->mutex);
}
/*
* snic_disc_start: Discovery Start ...
*/
int
snic_disc_start(struct snic *snic)
{
struct snic_disc *disc = &snic->disc;
int ret = 0;
SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
mutex_lock(&disc->mutex);
if (disc->state == SNIC_DISC_PENDING) {
disc->req_cnt++;
mutex_unlock(&disc->mutex);
return ret;
}
disc->state = SNIC_DISC_PENDING;
mutex_unlock(&disc->mutex);
ret = snic_queue_report_tgt_req(snic);
if (ret)
SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
return ret;
} /* end of snic_disc_start */
/*
* snic_disc_work :
*/
void
snic_handle_disc(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, disc_work);
int ret = 0;
SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
ret = snic_disc_start(snic);
if (ret)
goto disc_err;
disc_err:
SNIC_HOST_ERR(snic->shost,
"disc_work: Discovery Failed w/ err = %d\n",
ret);
} /* end of snic_disc_work */
/*
* snic_tgt_del_all : cleanup all snic targets
* Called on unbinding the interface
*/
void
snic_tgt_del_all(struct snic *snic)
{
struct snic_tgt *tgt = NULL;
struct list_head *cur, *nxt;
unsigned long flags;
mutex_lock(&snic->disc.mutex);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
tgt = list_entry(cur, struct snic_tgt, list);
tgt->state = SNIC_TGT_STAT_DEL;
list_del_init(&tgt->list);
SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
queue_work(snic_glob->event_q, &tgt->del_work);
tgt = NULL;
}
spin_unlock_irqrestore(snic->shost->host_lock, flags);
scsi_flush_work(snic->shost);
mutex_unlock(&snic->disc.mutex);
} /* end of snic_tgt_del_all */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_DISC_H
#define __SNIC_DISC_H
#include "snic_fwint.h"
enum snic_disc_state {
SNIC_DISC_NONE,
SNIC_DISC_INIT,
SNIC_DISC_PENDING,
SNIC_DISC_DONE
};
struct snic;
struct snic_disc {
struct list_head tgt_list;
enum snic_disc_state state;
struct mutex mutex;
u16 disc_id;
u8 req_cnt;
u32 nxt_tgt_id;
u32 rtgt_cnt;
u8 *rtgt_info;
struct delayed_work disc_timeout;
void (*cb)(struct snic *);
};
#define SNIC_TGT_NAM_LEN 16
enum snic_tgt_state {
SNIC_TGT_STAT_NONE,
SNIC_TGT_STAT_INIT,
SNIC_TGT_STAT_ONLINE, /* Target is Online */
SNIC_TGT_STAT_OFFLINE, /* Target is Offline */
SNIC_TGT_STAT_DEL,
};
struct snic_tgt_priv {
struct list_head list;
enum snic_tgt_type typ;
u16 disc_id;
char *name[SNIC_TGT_NAM_LEN];
union {
/*DAS Target specific info */
/*SAN Target specific info */
u8 dummmy;
} u;
};
/* snic tgt flags */
#define SNIC_TGT_SCAN_PENDING 0x01
struct snic_tgt {
struct list_head list;
u16 id;
u16 channel;
u32 flags;
u32 scsi_tgt_id;
enum snic_tgt_state state;
struct device dev;
struct work_struct scan_work;
struct work_struct del_work;
struct snic_tgt_priv tdata;
};
struct snic_fw_req;
void snic_disc_init(struct snic_disc *);
int snic_disc_start(struct snic *);
void snic_disc_term(struct snic *);
int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq);
void snic_process_report_tgts_rsp(struct work_struct *);
void snic_handle_tgt_disc(struct work_struct *);
void snic_handle_disc(struct work_struct *);
void snic_tgt_dev_release(struct device *);
void snic_tgt_del_all(struct snic *);
#define dev_to_tgt(d) \
container_of(d, struct snic_tgt, dev)
static inline int
is_snic_target(struct device *dev)
{
return dev->release == snic_tgt_dev_release;
}
#define starget_to_tgt(st) \
(is_snic_target(((struct scsi_target *) st)->dev.parent) ? \
dev_to_tgt(st->dev.parent) : NULL)
#define snic_tgt_to_shost(t) \
dev_to_shost(t->dev.parent)
static inline int
snic_tgt_chkready(struct snic_tgt *tgt)
{
if (tgt->state == SNIC_TGT_STAT_ONLINE)
return 0;
else
return DID_NO_CONNECT << 16;
}
const char *snic_tgt_state_to_str(int);
int snic_tgt_scsi_abort_io(struct snic_tgt *);
#endif /* end of __SNIC_DISC_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_FWINT_H
#define __SNIC_FWINT_H
#define SNIC_CDB_LEN 32 /* SCSI CDB size 32, can be used for 16 bytes */
#define LUN_ADDR_LEN 8
/*
* Command entry type
*/
enum snic_io_type {
/*
* Initiator request types
*/
SNIC_REQ_REPORT_TGTS = 0x2, /* Report Targets */
SNIC_REQ_ICMND, /* Initiator command for SCSI IO */
SNIC_REQ_ITMF, /* Initiator command for Task Mgmt */
SNIC_REQ_HBA_RESET, /* SNIC Reset */
SNIC_REQ_EXCH_VER, /* Exchange Version Information */
SNIC_REQ_TGT_INFO, /* Backend/Target Information */
SNIC_REQ_BOOT_LUNS,
/*
* Response type
*/
SNIC_RSP_REPORT_TGTS_CMPL = 0x12,/* Report Targets Completion */
SNIC_RSP_ICMND_CMPL, /* SCSI IO Completion */
SNIC_RSP_ITMF_CMPL, /* Task Management Completion */
SNIC_RSP_HBA_RESET_CMPL, /* SNIC Reset Completion */
SNIC_RSP_EXCH_VER_CMPL, /* Exchange Version Completion*/
SNIC_RSP_BOOT_LUNS_CMPL,
/*
* Misc Request types
*/
SNIC_MSG_ACK = 0x80, /* Ack: snic_notify_msg */
SNIC_MSG_ASYNC_EVNOTIFY, /* Asynchronous Event Notification */
}; /* end of enum snic_io_type */
/*
* Header status codes from firmware
*/
enum snic_io_status {
SNIC_STAT_IO_SUCCESS = 0, /* request was successful */
/*
* If a request to the fw is rejected, the original request header
* will be returned with the status set to one of the following:
*/
SNIC_STAT_INVALID_HDR, /* header contains invalid data */
SNIC_STAT_OUT_OF_RES, /* out of resources to complete request */
SNIC_STAT_INVALID_PARM, /* some parameter in request is not valid */
SNIC_STAT_REQ_NOT_SUP, /* req type is not supported */
SNIC_STAT_IO_NOT_FOUND, /* requested IO was not found */
/*
* Once a request is processed, the fw will usually return
* a cmpl message type. In cases where errors occurred,
* the header status would be filled in with one of the following:
*/
SNIC_STAT_ABORTED, /* req was aborted */
SNIC_STAT_TIMEOUT, /* req was timed out */
SNIC_STAT_SGL_INVALID, /* req was aborted due to sgl error */
SNIC_STAT_DATA_CNT_MISMATCH, /*recv/sent more/less data than expec */
SNIC_STAT_FW_ERR, /* req was terminated due to fw error */
SNIC_STAT_ITMF_REJECT, /* itmf req was rejected by target */
SNIC_STAT_ITMF_FAIL, /* itmf req was failed */
SNIC_STAT_ITMF_INCORRECT_LUN, /* itmf req has incorrect LUN id*/
SNIC_STAT_CMND_REJECT, /* req was invalid and rejected */
SNIC_STAT_DEV_OFFLINE, /* req sent to offline device */
SNIC_STAT_NO_BOOTLUN,
SNIC_STAT_SCSI_ERR, /* SCSI error returned by Target. */
SNIC_STAT_NOT_READY, /* sNIC Subsystem is not ready */
SNIC_STAT_FATAL_ERROR, /* sNIC is in unrecoverable state */
}; /* end of enum snic_io_status */
/*
* snic_io_hdr : host <--> firmare
*
* for any other message that will be queued to firmware should
* have the following request header
*/
struct snic_io_hdr {
__le32 hid;
__le32 cmnd_id; /* tag here */
ulong init_ctx; /* initiator context */
u8 type; /* request/response type */
u8 status; /* header status entry */
u8 protocol; /* Protocol specific, may needed for RoCE*/
u8 flags;
__le16 sg_cnt;
u16 resvd;
};
/* auxillary funciton for encoding the snic_io_hdr */
static inline void
snic_io_hdr_enc(struct snic_io_hdr *hdr, u8 typ, u8 status, u32 id, u32 hid,
u16 sg_cnt, ulong ctx)
{
hdr->type = typ;
hdr->status = status;
hdr->protocol = 0;
hdr->hid = cpu_to_le32(hid);
hdr->cmnd_id = cpu_to_le32(id);
hdr->sg_cnt = cpu_to_le16(sg_cnt);
hdr->init_ctx = ctx;
hdr->flags = 0;
}
/* auxillary funciton for decoding the snic_io_hdr */
static inline void
snic_io_hdr_dec(struct snic_io_hdr *hdr, u8 *typ, u8 *stat, u32 *cmnd_id,
u32 *hid, ulong *ctx)
{
*typ = hdr->type;
*stat = hdr->status;
*hid = le32_to_cpu(hdr->hid);
*cmnd_id = le32_to_cpu(hdr->cmnd_id);
*ctx = hdr->init_ctx;
}
/*
* snic_host_info: host -> firmware
*
* Used for sending host information to firmware, and request fw version
*/
struct snic_exch_ver_req {
__le32 drvr_ver; /* for debugging, when fw dump captured */
__le32 os_type; /* for OS specific features */
};
/*
* os_type flags
* Bit 0-7 : OS information
* Bit 8-31: Feature/Capability Information
*/
#define SNIC_OS_LINUX 0x1
#define SNIC_OS_WIN 0x2
#define SNIC_OS_ESX 0x3
/*
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
* Bit 3: Async event notifications on on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
#define SNIC_HBA_CAP_DDL 0x02 /* Supports Dynamic Discovery of LUNs */
#define SNIC_HBA_CAP_AEN 0x04 /* Supports Async Event Noitifcation */
#define SNIC_HBA_CAP_TMO 0x08 /* Supports IO timeout in FW */
/*
* snic_exch_ver_rsp : firmware -> host
*
* Used by firmware to send response to version request
*/
struct snic_exch_ver_rsp {
__le32 version;
__le32 hid;
__le32 max_concur_ios; /* max concurrent ios */
__le32 max_sgs_per_cmd; /* max sgls per IO */
__le32 max_io_sz; /* max io size supported */
__le32 hba_cap; /* hba capabilities */
__le32 max_tgts; /* max tgts supported */
__le16 io_timeout; /* FW extended timeout */
u16 rsvd;
};
/*
* snic_report_tgts : host -> firmware request
*
* Used by the host to request list of targets
*/
struct snic_report_tgts {
__le16 sg_cnt;
__le16 flags; /* specific flags from fw */
u8 _resvd[4];
__le64 sg_addr; /* Points to SGL */
__le64 sense_addr;
};
enum snic_type {
SNIC_NONE = 0x0,
SNIC_DAS,
SNIC_SAN,
};
/* Report Target Response */
enum snic_tgt_type {
SNIC_TGT_NONE = 0x0,
SNIC_TGT_DAS, /* DAS Target */
SNIC_TGT_SAN, /* SAN Target */
};
/* target id format */
struct snic_tgt_id {
__le32 tgt_id; /* target id */
__le16 tgt_type; /* tgt type */
__le16 vnic_id; /* corresponding vnic id */
};
/*
* snic_report_tgts_cmpl : firmware -> host response
*
* Used by firmware to send response to Report Targets request
*/
struct snic_report_tgts_cmpl {
__le32 tgt_cnt; /* Number of Targets accessible */
u32 _resvd;
};
/*
* Command flags
*
* Bit 0: Read flags
* Bit 1: Write flag
* Bit 2: ESGL - sg/esg array contains extended sg
* ESGE - is a host buffer contains sg elements
* Bit 3-4: Task Attributes
* 00b - simple
* 01b - head of queue
* 10b - ordered
* Bit 5-7: Priority - future use
* Bit 8-15: Reserved
*/
#define SNIC_ICMND_WR 0x01 /* write command */
#define SNIC_ICMND_RD 0x02 /* read command */
#define SNIC_ICMND_ESGL 0x04 /* SGE/ESGE array contains valid data*/
/*
* Priority/Task Attribute settings
*/
#define SNIC_ICMND_TSK_SHIFT 2 /* task attr starts at bit 2 */
#define SNIC_ICMND_TSK_MASK(x) ((x>>SNIC_ICMND_TSK_SHIFT) & ~(0xffff))
#define SNIC_ICMND_TSK_SIMPLE 0 /* simple task attr */
#define SNIC_ICMND_TSK_HEAD_OF_QUEUE 1 /* head of qeuue task attr */
#define SNIC_ICMND_TSK_ORDERED 2 /* ordered task attr */
#define SNIC_ICMND_PRI_SHIFT 5 /* prio val starts at bit 5 */
/*
* snic_icmnd : host-> firmware request
*
* used for sending out an initiator SCSI 16/32-byte command
*/
struct snic_icmnd {
__le16 sg_cnt; /* Number of SG Elements */
__le16 flags; /* flags */
__le32 sense_len; /* Sense buffer length */
__le64 tgt_id; /* Destination Target ID */
__le64 lun_id; /* Destination LUN ID */
u8 cdb_len;
u8 _resvd;
__le16 time_out; /* ms time for Res allocations fw to handle io*/
__le32 data_len; /* Total number of bytes to be transferred */
u8 cdb[SNIC_CDB_LEN];
__le64 sg_addr; /* Points to SG List */
__le64 sense_addr; /* Sense buffer address */
};
/* Response flags */
/* Bit 0: Under run
* Bit 1: Over Run
* Bit 2-7: Reserved
*/
#define SNIC_ICMND_CMPL_UNDR_RUN 0x01 /* resid under and valid */
#define SNIC_ICMND_CMPL_OVER_RUN 0x02 /* resid over and valid */
/*
* snic_icmnd_cmpl: firmware -> host response
*
* Used for sending the host a response to an icmnd (initiator command)
*/
struct snic_icmnd_cmpl {
u8 scsi_status; /* value as per SAM */
u8 flags;
__le16 sense_len; /* Sense Length */
__le32 resid; /* Residue : # bytes under or over run */
};
/*
* snic_itmf: host->firmware request
*
* used for requesting the firmware to abort a request and/or send out
* a task management function
*
* the req_id field is valid in case of abort task and clear task
*/
struct snic_itmf {
u8 tm_type; /* SCSI Task Management request */
u8 resvd;
__le16 flags; /* flags */
__le32 req_id; /* Command id of snic req to be aborted */
__le64 tgt_id; /* Target ID */
__le64 lun_id; /* Destination LUN ID */
__le16 timeout; /* in sec */
};
/*
* Task Management Request
*/
enum snic_itmf_tm_type {
SNIC_ITMF_ABTS_TASK = 0x01, /* Abort Task */
SNIC_ITMF_ABTS_TASK_SET, /* Abort Task Set */
SNIC_ITMF_CLR_TASK, /* Clear Task */
SNIC_ITMF_CLR_TASKSET, /* Clear Task Set */
SNIC_ITMF_LUN_RESET, /* Lun Reset */
SNIC_ITMF_ABTS_TASK_TERM, /* Supported for SAN Targets */
};
/*
* snic_itmf_cmpl: firmware -> host resposne
*
* used for sending the host a response for a itmf request
*/
struct snic_itmf_cmpl {
__le32 nterminated; /* # IOs terminated as a result of tmf */
u8 flags; /* flags */
u8 _resvd[3];
};
/*
* itmfl_cmpl flags
* Bit 0 : 1 - Num terminated field valid
* Bit 1 - 7 : Reserved
*/
#define SNIC_NUM_TERM_VALID 0x01 /* Number of IOs terminated */
/*
* snic_hba_reset: host -> firmware request
*
* used for requesting firmware to reset snic
*/
struct snic_hba_reset {
__le16 flags; /* flags */
u8 _resvd[6];
};
/*
* snic_hba_reset_cmpl: firmware -> host response
*
* Used by firmware to respond to the host's hba reset request
*/
struct snic_hba_reset_cmpl {
u8 flags; /* flags : more info needs to be added*/
u8 _resvd[7];
};
/*
* snic_notify_msg: firmware -> host response
*
* Used by firmware to notify host of the last work queue entry received
*/
struct snic_notify_msg {
__le32 wqe_num; /* wq entry number */
u8 flags; /* flags, macros */
u8 _resvd[4];
};
#define SNIC_EVDATA_LEN 24 /* in bytes */
/* snic_async_evnotify: firmware -> host notification
*
* Used by firmware to notify the host about configuration/state changes
*/
struct snic_async_evnotify {
u8 FLS_EVENT_DESC;
u8 vnic; /* vnic id */
u8 _resvd[2];
__le32 ev_id; /* Event ID */
u8 ev_data[SNIC_EVDATA_LEN]; /* Event Data */
u8 _resvd2[4];
};
/* async event flags */
enum snic_ev_type {
SNIC_EV_TGT_OFFLINE = 0x01, /* Target Offline, PL contains TGT ID */
SNIC_EV_TGT_ONLINE, /* Target Online, PL contains TGT ID */
SNIC_EV_LUN_OFFLINE, /* LUN Offline, PL contains LUN ID */
SNIC_EV_LUN_ONLINE, /* LUN Online, PL contains LUN ID */
SNIC_EV_CONF_CHG, /* Dev Config/Attr Change Event */
SNIC_EV_TGT_ADDED, /* Target Added */
SNIC_EV_TGT_DELTD, /* Target Del'd, PL contains TGT ID */
SNIC_EV_LUN_ADDED, /* LUN Added */
SNIC_EV_LUN_DELTD, /* LUN Del'd, PL cont. TGT & LUN ID */
SNIC_EV_DISC_CMPL = 0x10, /* Discovery Completed Event */
};
#define SNIC_HOST_REQ_LEN 128 /*Exp length of host req, wq desc sz*/
/* Payload 88 bytes = 128 - 24 - 16 */
#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \
sizeof(struct snic_io_hdr) - \
(2 * sizeof(u64))))
/*
* snic_host_req: host -> firmware request
*
* Basic structure for all snic requests that are sent from the host to
* firmware. They are 128 bytes in size.
*/
struct snic_host_req {
u64 ctrl_data[2]; /*16 bytes - Control Data */
struct snic_io_hdr hdr;
union {
/*
* Entry specific space, last byte contains color
*/
u8 buf[SNIC_HOST_REQ_PAYLOAD];
/*
* Exchange firmware version
*/
struct snic_exch_ver_req exch_ver;
/* report targets */
struct snic_report_tgts rpt_tgts;
/* io request */
struct snic_icmnd icmnd;
/* task management request */
struct snic_itmf itmf;
/* hba reset */
struct snic_hba_reset reset;
} u;
}; /* end of snic_host_req structure */
#define SNIC_FW_REQ_LEN 64 /* Expected length of fw req */
struct snic_fw_req {
struct snic_io_hdr hdr;
union {
/*
* Entry specific space, last byte contains color
*/
u8 buf[SNIC_FW_REQ_LEN - sizeof(struct snic_io_hdr)];
/* Exchange Version Response */
struct snic_exch_ver_rsp exch_ver_cmpl;
/* Report Targets Response */
struct snic_report_tgts_cmpl rpt_tgts_cmpl;
/* scsi response */
struct snic_icmnd_cmpl icmnd_cmpl;
/* task management response */
struct snic_itmf_cmpl itmf_cmpl;
/* hba reset response */
struct snic_hba_reset_cmpl reset_cmpl;
/* notify message */
struct snic_notify_msg ack;
/* async notification event */
struct snic_async_evnotify async_ev;
} u;
}; /* end of snic_fw_req structure */
/*
* Auxillary macro to verify specific snic req/cmpl structures
* to ensure that it will be aligned to 64 bit, and not using
* color bit field
*/
#define VERIFY_REQ_SZ(x)
#define VERIFY_CMPL_SZ(x)
/*
* Access routines to encode and decode the color bit, which is the most
* significant bit of the structure.
*/
static inline void
snic_color_enc(struct snic_fw_req *req, u8 color)
{
u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
if (color)
*c |= 0x80;
else
*c &= ~0x80;
}
static inline void
snic_color_dec(struct snic_fw_req *req, u8 *color)
{
u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1;
*color = *c >> 7;
/* Make sure color bit is read from desc *before* other fields
* are read from desc. Hardware guarantees color bit is last
* bit (byte) written. Adding the rmb() prevents the compiler
* and/or CPU from reordering the reads which would potentially
* result in reading stale values.
*/
rmb();
}
#endif /* end of __SNIC_FWINT_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <scsi/scsi_tcq.h>
#include "snic_io.h"
#include "snic.h"
#include "cq_enet_desc.h"
#include "snic_fwint.h"
static void
snic_wq_cmpl_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf,
void *opaque)
{
struct snic *snic = svnic_dev_priv(wq->vdev);
SNIC_BUG_ON(buf->os_buf == NULL);
if (snic_log_level & SNIC_DESC_LOGGING)
SNIC_HOST_INFO(snic->shost,
"Ack received for snic_host_req %p.\n",
buf->os_buf);
SNIC_TRC(snic->shost->host_no, 0, 0,
((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
0);
pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
buf->os_buf = NULL;
}
static int
snic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc,
u8 type,
u16 q_num,
u16 cmpl_idx,
void *opaque)
{
struct snic *snic = svnic_dev_priv(vdev);
unsigned long flags;
SNIC_BUG_ON(q_num != 0);
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
svnic_wq_service(&snic->wq[q_num],
cq_desc,
cmpl_idx,
snic_wq_cmpl_frame_send,
NULL);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
return 0;
} /* end of snic_cmpl_handler_cont */
int
snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
{
unsigned int work_done = 0;
unsigned int i;
snic->s_stats.misc.last_ack_time = jiffies;
for (i = 0; i < snic->wq_count; i++) {
work_done += svnic_cq_service(&snic->cq[i],
work_to_do,
snic_wq_cmpl_handler_cont,
NULL);
}
return work_done;
} /* end of snic_wq_cmpl_handler */
void
snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct snic_host_req *req = buf->os_buf;
struct snic *snic = svnic_dev_priv(wq->vdev);
struct snic_req_info *rqi = NULL;
unsigned long flags;
pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
rqi = req_to_rqi(req);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
if (list_empty(&rqi->list)) {
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
goto end;
}
SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */
list_del_init(&rqi->list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
if (rqi->sge_va) {
snic_pci_unmap_rsp_buf(snic, rqi);
kfree((void *)rqi->sge_va);
rqi->sge_va = 0;
}
snic_req_free(snic, rqi);
SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
end:
return;
}
/* Criteria to select work queue in multi queue mode */
static int
snic_select_wq(struct snic *snic)
{
/* No multi queue support for now */
BUILD_BUG_ON(SNIC_WQ_MAX > 1);
return 0;
}
int
snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
{
dma_addr_t pa = 0;
unsigned long flags;
struct snic_fw_stats *fwstats = &snic->s_stats.fw;
long act_reqs;
int q_num = 0;
snic_print_desc(__func__, os_buf, len);
/* Map request buffer */
pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
return -ENOMEM;
}
q_num = snic_select_wq(snic);
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
if (!svnic_wq_desc_avail(snic->wq)) {
pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
return -ENOMEM;
}
snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
/* Update stats */
act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
atomic64_set(&fwstats->max_actv_reqs, act_reqs);
return 0;
} /* end of snic_queue_wq_desc() */
/*
* snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
* Purpose : Used during driver unload to clean up the requests.
*/
void
snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
{
unsigned long flags;
INIT_LIST_HEAD(&rqi->list);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
list_add_tail(&rqi->list, &snic->spl_cmd_list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
}
/*
* snic_req_init:
* Allocates snic_req_info + snic_host_req + sgl data, and initializes.
*/
struct snic_req_info *
snic_req_init(struct snic *snic, int sg_cnt)
{
u8 typ;
struct snic_req_info *rqi = NULL;
typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ?
SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL;
rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
if (!rqi) {
atomic64_inc(&snic->s_stats.io.alloc_fail);
SNIC_HOST_ERR(snic->shost,
"Failed to allocate memory from snic req pool id = %d\n",
typ);
return rqi;
}
memset(rqi, 0, sizeof(*rqi));
rqi->rq_pool_type = typ;
rqi->start_time = jiffies;
rqi->req = (struct snic_host_req *) (rqi + 1);
rqi->req_len = sizeof(struct snic_host_req);
rqi->snic = snic;
rqi->req = (struct snic_host_req *)(rqi + 1);
if (sg_cnt == 0)
goto end;
rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc));
if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT);
atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
end:
memset(rqi->req, 0, rqi->req_len);
/* pre initialization of init_ctx to support req_to_rqi */
rqi->req->hdr.init_ctx = (ulong) rqi;
SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
return rqi;
} /* end of snic_req_init */
/*
* snic_abort_req_init : Inits abort request.
*/
struct snic_host_req *
snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_host_req *req = NULL;
SNIC_BUG_ON(!rqi);
/* If abort to be issued second time, then reuse */
if (rqi->abort_req)
return rqi->abort_req;
req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
if (!req) {
SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
WARN_ON_ONCE(1);
return NULL;
}
rqi->abort_req = req;
memset(req, 0, sizeof(struct snic_host_req));
/* pre initialization of init_ctx to support req_to_rqi */
req->hdr.init_ctx = (ulong) rqi;
return req;
} /* end of snic_abort_req_init */
/*
* snic_dr_req_init : Inits device reset req
*/
struct snic_host_req *
snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_host_req *req = NULL;
SNIC_BUG_ON(!rqi);
req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
if (!req) {
SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
WARN_ON_ONCE(1);
return NULL;
}
SNIC_BUG_ON(rqi->dr_req != NULL);
rqi->dr_req = req;
memset(req, 0, sizeof(struct snic_host_req));
/* pre initialization of init_ctx to support req_to_rqi */
req->hdr.init_ctx = (ulong) rqi;
return req;
} /* end of snic_dr_req_init */
/* frees snic_req_info and snic_host_req */
void
snic_req_free(struct snic *snic, struct snic_req_info *rqi)
{
SNIC_BUG_ON(rqi->req == rqi->abort_req);
SNIC_BUG_ON(rqi->req == rqi->dr_req);
SNIC_BUG_ON(rqi->sge_va != 0);
SNIC_SCSI_DBG(snic->shost,
"Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
rqi, rqi->req, rqi->abort_req, rqi->dr_req);
if (rqi->abort_req)
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
if (rqi->dr_req)
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
}
void
snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
{
struct snic_sg_desc *sgd;
sgd = req_to_sgl(rqi_to_req(rqi));
SNIC_BUG_ON(sgd[0].addr == 0);
pci_unmap_single(snic->pdev,
le64_to_cpu(sgd[0].addr),
le32_to_cpu(sgd[0].len),
PCI_DMA_FROMDEVICE);
}
/*
* snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
*/
void
snic_free_all_untagged_reqs(struct snic *snic)
{
struct snic_req_info *rqi;
struct list_head *cur, *nxt;
unsigned long flags;
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
rqi = list_entry(cur, struct snic_req_info, list);
list_del_init(&rqi->list);
if (rqi->sge_va) {
snic_pci_unmap_rsp_buf(snic, rqi);
kfree((void *)rqi->sge_va);
rqi->sge_va = 0;
}
snic_req_free(snic, rqi);
}
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
}
/*
* snic_release_untagged_req : Unlinks the untagged req and frees it.
*/
void
snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
{
unsigned long flags;
spin_lock_irqsave(&snic->snic_lock, flags);
if (snic->in_remove) {
spin_unlock_irqrestore(&snic->snic_lock, flags);
goto end;
}
spin_unlock_irqrestore(&snic->snic_lock, flags);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
if (list_empty(&rqi->list)) {
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
goto end;
}
list_del_init(&rqi->list);
spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
snic_req_free(snic, rqi);
end:
return;
}
/* dump buf in hex fmt */
void
snic_hex_dump(char *pfx, char *data, int len)
{
SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len);
print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len);
}
#define LINE_BUFSZ 128 /* for snic_print_desc fn */
static void
snic_dump_desc(const char *fn, char *os_buf, int len)
{
struct snic_host_req *req = (struct snic_host_req *) os_buf;
struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf;
struct snic_req_info *rqi = NULL;
char line[LINE_BUFSZ] = { '\0' };
char *cmd_str = NULL;
if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL)
rqi = (struct snic_req_info *) fwreq->hdr.init_ctx;
else
rqi = (struct snic_req_info *) req->hdr.init_ctx;
SNIC_BUG_ON(rqi == NULL || rqi->req == NULL);
switch (req->hdr.type) {
case SNIC_REQ_REPORT_TGTS:
cmd_str = "report-tgt : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :");
break;
case SNIC_REQ_ICMND:
cmd_str = "icmnd : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :",
req->u.icmnd.cdb[0]);
break;
case SNIC_REQ_ITMF:
cmd_str = "itmf : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :");
break;
case SNIC_REQ_HBA_RESET:
cmd_str = "hba reset :";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :");
break;
case SNIC_REQ_EXCH_VER:
cmd_str = "exch ver : ";
snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :");
break;
case SNIC_REQ_TGT_INFO:
cmd_str = "tgt info : ";
break;
case SNIC_RSP_REPORT_TGTS_CMPL:
cmd_str = "report tgt cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :");
break;
case SNIC_RSP_ICMND_CMPL:
cmd_str = "icmnd_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :",
rqi->req->u.icmnd.cdb[0]);
break;
case SNIC_RSP_ITMF_CMPL:
cmd_str = "itmf_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :");
break;
case SNIC_RSP_HBA_RESET_CMPL:
cmd_str = "hba_reset_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :");
break;
case SNIC_RSP_EXCH_VER_CMPL:
cmd_str = "exch_ver_cmpl : ";
snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :");
break;
case SNIC_MSG_ACK:
cmd_str = "msg ack : ";
snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :");
break;
case SNIC_MSG_ASYNC_EVNOTIFY:
cmd_str = "async notify : ";
snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :");
break;
default:
cmd_str = "unknown : ";
SNIC_BUG_ON(1);
break;
}
SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status,
req->hdr.init_ctx);
/* Enable it, to dump byte stream */
if (snic_log_level & 0x20)
snic_hex_dump(cmd_str, os_buf, len);
} /* end of __snic_print_desc */
void
snic_print_desc(const char *fn, char *os_buf, int len)
{
if (snic_log_level & SNIC_DESC_LOGGING)
snic_dump_desc(fn, os_buf, len);
}
void
snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
{
u64 duration;
duration = jiffies - rqi->start_time;
if (duration > atomic64_read(&snic->s_stats.io.max_time))
atomic64_set(&snic->s_stats.io.max_time, duration);
}
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _SNIC_IO_H
#define _SNIC_IO_H
#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */
#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */
#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
/* SG descriptor for snic */
struct snic_sg_desc {
__le64 addr;
__le32 len;
u32 _resvd;
};
struct snic_dflt_sgl {
struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT];
};
struct snic_max_sgl {
struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT];
};
enum snic_req_cache_type {
SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */
SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */
SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
snic_host_req objects only*/
SNIC_REQ_MAX_CACHES /* number of sgl caches */
};
/* Per IO internal state */
struct snic_internal_io_state {
char *rqi;
u64 flags;
u32 state;
u32 abts_status; /* Abort completion status */
u32 lr_status; /* device reset completion status */
};
/* IO state machine */
enum snic_ioreq_state {
SNIC_IOREQ_NOT_INITED = 0,
SNIC_IOREQ_PENDING,
SNIC_IOREQ_ABTS_PENDING,
SNIC_IOREQ_ABTS_COMPLETE,
SNIC_IOREQ_LR_PENDING,
SNIC_IOREQ_LR_COMPLETE,
SNIC_IOREQ_COMPLETE,
};
struct snic;
struct snic_host_req;
/*
* snic_req_info : Contains info about IO, one per scsi command.
* Notes: Make sure that the structure is aligned to 16 B
* this helps in easy access to snic_req_info from snic_host_req
*/
struct snic_req_info {
struct list_head list;
struct snic_host_req *req;
u64 start_time; /* start time in jiffies */
u16 rq_pool_type; /* noticion of request pool type */
u16 req_len; /* buf len passing to fw (req + sgl)*/
u32 tgt_id;
u32 tm_tag;
u8 io_cmpl:1; /* sets to 1 when fw completes IO */
u8 resvd[3];
struct scsi_cmnd *sc; /* Associated scsi cmd */
struct snic *snic; /* Associated snic */
ulong sge_va; /* Pointer to Resp Buffer */
u64 snsbuf_va;
struct snic_host_req *abort_req;
struct completion *abts_done;
struct snic_host_req *dr_req;
struct completion *dr_done;
};
#define rqi_to_req(rqi) \
((struct snic_host_req *) (((struct snic_req_info *)rqi)->req))
#define req_to_rqi(req) \
((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx))
#define req_to_sgl(req) \
((struct snic_sg_desc *) (((struct snic_host_req *)req)+1))
struct snic_req_info *
snic_req_init(struct snic *, int sg_cnt);
void snic_req_free(struct snic *, struct snic_req_info *);
void snic_calc_io_process_time(struct snic *, struct snic_req_info *);
void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_abort_req_init(struct snic *, struct snic_req_info *);
struct snic_host_req *
snic_dr_req_init(struct snic *, struct snic_req_info *);
#endif /* _SNIC_IO_H */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic_io.h"
#include "snic.h"
/*
* snic_isr_msix_wq : MSIx ISR for work queue.
*/
static irqreturn_t
snic_isr_msix_wq(int irq, void *data)
{
struct snic *snic = data;
unsigned long wq_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
wq_work_done = snic_wq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
wq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_wq */
static irqreturn_t
snic_isr_msix_io_cmpl(int irq, void *data)
{
struct snic *snic = data;
unsigned long iocmpl_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
iocmpl_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
} /* end of snic_isr_msix_io_cmpl */
static irqreturn_t
snic_isr_msix_err_notify(int irq, void *data)
{
struct snic *snic = data;
snic->s_stats.misc.last_isr_time = jiffies;
atomic64_inc(&snic->s_stats.misc.isr_cnt);
svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
snic_log_q_error(snic);
/*Handling link events */
snic_handle_link_event(snic);
return IRQ_HANDLED;
} /* end of snic_isr_msix_err_notify */
void
snic_free_intr(struct snic *snic)
{
int i;
/* ONLY interrupt mode MSIX is supported */
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
if (snic->msix[i].requested) {
free_irq(snic->msix_entry[i].vector,
snic->msix[i].devid);
}
}
} /* end of snic_free_intr */
int
snic_request_intr(struct snic *snic)
{
int ret = 0, i;
enum vnic_dev_intr_mode intr_mode;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
/*
* Currently HW supports single WQ and CQ. So passing devid as snic.
* When hardware supports multiple WQs and CQs, one idea is
* to pass devid as corresponding WQ or CQ ptr and retrieve snic
* from queue ptr.
* Except for err_notify, which is always one.
*/
sprintf(snic->msix[SNIC_MSIX_WQ].devname,
"%.11s-scsi-wq",
snic->name);
snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq;
snic->msix[SNIC_MSIX_WQ].devid = snic;
sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname,
"%.11s-io-cmpl",
snic->name);
snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl;
snic->msix[SNIC_MSIX_IO_CMPL].devid = snic;
sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname,
"%.11s-err-notify",
snic->name);
snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify;
snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
ret = request_irq(snic->msix_entry[i].vector,
snic->msix[i].isr,
0,
snic->msix[i].devname,
snic->msix[i].devid);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"MSI-X: requrest_irq(%d) failed %d\n",
i,
ret);
snic_free_intr(snic);
break;
}
snic->msix[i].requested = 1;
}
return ret;
} /* end of snic_requrest_intr */
int
snic_set_intr_mode(struct snic *snic)
{
unsigned int n = ARRAY_SIZE(snic->wq);
unsigned int m = SNIC_CQ_IO_CMPL_MAX;
unsigned int i;
/*
* We need n WQs, m CQs, and n+m+1 INTRs
* (last INTR is used for WQ/CQ errors and notification area
*/
BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
ARRAY_SIZE(snic->intr));
SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
for (i = 0; i < (n + m + 1); i++)
snic->msix_entry[i].entry = i;
if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
if (!pci_enable_msix(snic->pdev,
snic->msix_entry,
(n + m + 1))) {
snic->wq_count = n;
snic->cq_count = n + m;
snic->intr_count = n + m + 1;
snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
SNIC_ISR_DBG(snic->shost,
"Using MSI-X Interrupts\n");
svnic_dev_set_intr_mode(snic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
return 0;
}
}
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
} /* end of snic_set_intr_mode */
void
snic_clear_intr_mode(struct snic *snic)
{
pci_disable_msix(snic->pdev);
svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
此差异已折叠。
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "wq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "snic.h"
int
snic_get_vnic_config(struct snic *snic)
{
struct vnic_snic_config *c = &snic->config;
int ret;
#define GET_CONFIG(m) \
do { \
ret = svnic_dev_spec(snic->vdev, \
offsetof(struct vnic_snic_config, m), \
sizeof(c->m), \
&c->m); \
if (ret) { \
SNIC_HOST_ERR(snic->shost, \
"Error getting %s, %d\n", #m, ret); \
return ret; \
} \
} while (0)
GET_CONFIG(wq_enet_desc_count);
GET_CONFIG(maxdatafieldsize);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(flags);
GET_CONFIG(io_throttle_count);
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
GET_CONFIG(xpt_type);
GET_CONFIG(hid);
c->wq_enet_desc_count = min_t(u32,
VNIC_SNIC_WQ_DESCS_MAX,
max_t(u32,
VNIC_SNIC_WQ_DESCS_MIN,
c->wq_enet_desc_count));
c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
c->maxdatafieldsize = min_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MAX,
max_t(u32,
VNIC_SNIC_MAXDATAFIELDSIZE_MIN,
c->maxdatafieldsize));
c->io_throttle_count = min_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MAX,
max_t(u32,
VNIC_SNIC_IO_THROTTLE_COUNT_MIN,
c->io_throttle_count));
c->port_down_timeout = min_t(u32,
VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX,
c->port_down_timeout);
c->port_down_io_retries = min_t(u32,
VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX,
c->port_down_io_retries);
c->luns_per_tgt = min_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MAX,
max_t(u32,
VNIC_SNIC_LUNS_PER_TARGET_MIN,
c->luns_per_tgt));
c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer);
SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count);
SNIC_INFO("vNIC mtu %d intr timer %d\n",
c->maxdatafieldsize,
c->intr_timer);
SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n",
c->flags,
c->luns_per_tgt);
SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count);
SNIC_INFO("vNIC port down timeout %d port down io retries %d\n",
c->port_down_timeout,
c->port_down_io_retries);
SNIC_INFO("vNIC back end type = %d\n", c->xpt_type);
SNIC_INFO("vNIC hid = %d\n", c->hid);
return 0;
}
void
snic_get_res_counts(struct snic *snic)
{
snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ);
SNIC_BUG_ON(snic->wq_count == 0);
snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ);
SNIC_BUG_ON(snic->cq_count == 0);
snic->intr_count = svnic_dev_get_res_count(snic->vdev,
RES_TYPE_INTR_CTRL);
SNIC_BUG_ON(snic->intr_count == 0);
}
void
snic_free_vnic_res(struct snic *snic)
{
unsigned int i;
for (i = 0; i < snic->wq_count; i++)
svnic_wq_free(&snic->wq[i]);
for (i = 0; i < snic->cq_count; i++)
svnic_cq_free(&snic->cq[i]);
for (i = 0; i < snic->intr_count; i++)
svnic_intr_free(&snic->intr[i]);
}
int
snic_alloc_vnic_res(struct snic *snic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int intr_offset;
unsigned int err_intr_enable;
unsigned int err_intr_offset;
unsigned int i;
int ret;
intr_mode = svnic_dev_get_intr_mode(snic->vdev);
SNIC_INFO("vNIC interrupt mode: %s\n",
((intr_mode == VNIC_DEV_INTR_MODE_INTX) ?
"Legacy PCI INTx" :
((intr_mode == VNIC_DEV_INTR_MODE_MSI) ?
"MSI" :
((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ?
"MSI-X" : "Unknown"))));
/* only MSI-X is supported */
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count,
snic->cq_count,
snic->intr_count);
/* Allocate WQs used for SCSI IOs */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_wq_alloc(snic->vdev,
&snic->wq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct wq_enet_desc));
if (ret)
goto error_cleanup;
}
/* CQ for each WQ */
for (i = 0; i < snic->wq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
snic->config.wq_enet_desc_count,
sizeof(struct cq_enet_wq_desc));
if (ret)
goto error_cleanup;
}
SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count);
/* CQ for FW TO host */
for (i = snic->wq_count; i < snic->cq_count; i++) {
ret = svnic_cq_alloc(snic->vdev,
&snic->cq[i],
i,
(snic->config.wq_enet_desc_count * 3),
sizeof(struct snic_fw_req));
if (ret)
goto error_cleanup;
}
for (i = 0; i < snic->intr_count; i++) {
ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i);
if (ret)
goto error_cleanup;
}
/*
* Init WQ Resources.
* WQ[0 to n] points to CQ[0 to n-1]
* firmware to host comm points to CQ[n to m+1]
*/
err_intr_enable = 1;
err_intr_offset = snic->err_intr_offset;
for (i = 0; i < snic->wq_count; i++) {
svnic_wq_init(&snic->wq[i],
i,
err_intr_enable,
err_intr_offset);
}
for (i = 0; i < snic->cq_count; i++) {
intr_offset = i;
svnic_cq_init(&snic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
intr_offset,
0 /* cq_message_addr */);
}
/*
* Init INTR resources
* Assumption : snic is always in MSI-X mode
*/
SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX);
mask_on_assertion = 1;
for (i = 0; i < snic->intr_count; i++) {
svnic_intr_init(&snic->intr[i],
snic->config.intr_timer,
snic->config.intr_timer_type,
mask_on_assertion);
}
/* init the stats memory by making the first call here */
ret = svnic_dev_stats_dump(snic->vdev, &snic->stats);
if (ret) {
SNIC_HOST_ERR(snic->shost,
"svnic_dev_stats_dump failed - x%x\n",
ret);
goto error_cleanup;
}
/* Clear LIF stats */
svnic_dev_stats_clear(snic->vdev);
ret = 0;
return ret;
error_cleanup:
snic_free_vnic_res(snic);
return ret;
}
void
snic_log_q_error(struct snic *snic)
{
unsigned int i;
u32 err_status;
for (i = 0; i < snic->wq_count; i++) {
err_status = ioread32(&snic->wq[i].ctrl->error_status);
if (err_status)
SNIC_HOST_ERR(snic->shost,
"WQ[%d] error status %d\n",
i,
err_status);
}
} /* end of snic_log_q_error */
/*
* Copyright 2014 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
#include "snic_io.h"
#include "wq_enet_desc.h"
#include "vnic_wq.h"
#include "snic_fwint.h"
#include "vnic_cq_fw.h"
static inline void
snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx,
u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len,
u32 data_len, u16 sg_cnt, ulong sgl_addr,
dma_addr_t sns_addr_pa, u32 sense_len)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt,
ctx);
req->u.icmnd.flags = cpu_to_le16(flags);
req->u.icmnd.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN);
req->u.icmnd.cdb_len = cdb_len;
memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN);
memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len);
req->u.icmnd.data_len = cpu_to_le32(data_len);
req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr);
req->u.icmnd.sense_len = cpu_to_le32(sense_len);
req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa);
}
static inline void
snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx,
u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type)
{
snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx);
req->u.itmf.tm_type = tm_type;
req->u.itmf.flags = cpu_to_le16(flags);
/* req_id valid only in abort, clear task */
req->u.itmf.req_id = cpu_to_le32(req_id);
req->u.itmf.tgt_id = cpu_to_le64(tgt_id);
memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN);
}
static inline void
snic_queue_wq_eth_desc(struct vnic_wq *wq,
void *os_buf,
dma_addr_t dma_addr,
unsigned int len,
int vlan_tag_insert,
unsigned int vlan_tag,
int cq_entry)
{
struct wq_enet_desc *desc = svnic_wq_next_desc(wq);
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
(u16)len,
0, /* mss_or_csum_offset */
0, /* fc_eof */
0, /* offload mode */
1, /* eop */
(u8)cq_entry,
0, /* fcoe_encap */
(u8)vlan_tag_insert,
(u16)vlan_tag,
0 /* loopback */);
svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1);
}
struct snic;
int snic_get_vnic_config(struct snic *);
int snic_alloc_vnic_res(struct snic *);
void snic_free_vnic_res(struct snic *);
void snic_get_res_counts(struct snic *);
void snic_log_q_error(struct snic *);
int snic_get_vnic_resources_size(struct snic *);
#endif /* __SNIC_RES_H */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册