提交 a3667aae 编写于 作者: N Naresh Kumar Inna 提交者: James Bottomley

[SCSI] csiostor: Chelsio FCoE offload driver

Signed-off-by: NNaresh Kumar Inna <naresh@chelsio.com>
Signed-off-by: NJames Bottomley <JBottomley@Parallels.com>
上级 ce91a923
......@@ -1812,6 +1812,7 @@ config SCSI_VIRTIO
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
source "drivers/scsi/csiostor/Kconfig"
endif # SCSI_LOWLEVEL
......
......@@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
......
config SCSI_CHELSIO_FCOE
tristate "Chelsio Communications FCoE support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
select FW_LOADER
help
This driver supports FCoE Offload functionality over
Chelsio T4-based 10Gb Converged Network Adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module choose M here; the module
will be called csiostor.
#
## Chelsio FCoE driver
#
##
ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o
此差异已折叠。
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_DEFS_H__
#define __CSIO_DEFS_H__
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#define CSIO_INVALID_IDX 0xFFFFFFFF
#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)
#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)
#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)
#define CSIO_DID_MASK 0xFFFFFF
#define CSIO_WORD_TO_BYTE 4
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
return readl(addr) + ((u64)readl(addr + 4) << 32);
}
static inline void writeq(u64 val, void __iomem *addr)
{
writel(val, addr);
writel(val >> 32, addr + 4);
}
#endif
static inline int
csio_list_deleted(struct list_head *list)
{
return ((list->next == list) && (list->prev == list));
}
#define csio_list_next(elem) (((struct list_head *)(elem))->next)
#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
/* State machine */
typedef void (*csio_sm_state_t)(void *, uint32_t);
struct csio_sm {
struct list_head sm_list;
csio_sm_state_t sm_state;
};
static inline void
csio_set_state(void *smp, void *state)
{
((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
}
static inline void
csio_init_state(struct csio_sm *smp, void *state)
{
csio_set_state(smp, state);
}
static inline void
csio_post_event(void *smp, uint32_t evt)
{
((struct csio_sm *)smp)->sm_state(smp, evt);
}
static inline csio_sm_state_t
csio_get_state(void *smp)
{
return ((struct csio_sm *)smp)->sm_state;
}
static inline bool
csio_match_state(void *smp, void *state)
{
return (csio_get_state(smp) == (csio_sm_state_t)state);
}
#define CSIO_ASSERT(cond) BUG_ON(!(cond))
#ifdef __CSIO_DEBUG__
#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))
#else
#define CSIO_DB_ASSERT(__c)
#endif
#endif /* ifndef __CSIO_DEFS_H__ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_INIT_H__
#define __CSIO_INIT_H__
#include <linux/pci.h>
#include <linux/if_ether.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "csio_scsi.h"
#include "csio_lnode.h"
#include "csio_rnode.h"
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0"
#define CSIO_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
((_dev) == CSIO_DEVID_PE10K_PF1))
/* FCoE device IDs */
#define CSIO_DEVID_PE10K 0xA000
#define CSIO_DEVID_PE10K_PF1 0xA001
#define CSIO_DEVID_T440DBG_FCOE 0x4600
#define CSIO_DEVID_T420CR_FCOE 0x4601
#define CSIO_DEVID_T422CR_FCOE 0x4602
#define CSIO_DEVID_T440CR_FCOE 0x4603
#define CSIO_DEVID_T420BCH_FCOE 0x4604
#define CSIO_DEVID_T440BCH_FCOE 0x4605
#define CSIO_DEVID_T440CH_FCOE 0x4606
#define CSIO_DEVID_T420SO_FCOE 0x4607
#define CSIO_DEVID_T420CX_FCOE 0x4608
#define CSIO_DEVID_T420BT_FCOE 0x4609
#define CSIO_DEVID_T404BT_FCOE 0x460A
#define CSIO_DEVID_B420_FCOE 0x460B
#define CSIO_DEVID_B404_FCOE 0x460C
#define CSIO_DEVID_T480CR_FCOE 0x460D
#define CSIO_DEVID_T440LPCR_FCOE 0x460E
extern struct fc_function_template csio_fc_transport_funcs;
extern struct fc_function_template csio_fc_transport_vport_funcs;
void csio_fchost_attr_init(struct csio_lnode *);
/* INTx handlers */
void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *, void *);
/* Common os lnode APIs */
void csio_lnodes_block_request(struct csio_hw *);
void csio_lnodes_unblock_request(struct csio_hw *);
void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);
void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);
struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
struct csio_lnode *);
void csio_shost_exit(struct csio_lnode *);
void csio_lnodes_exit(struct csio_hw *, bool);
static inline struct Scsi_Host *
csio_ln_to_shost(struct csio_lnode *ln)
{
return container_of((void *)ln, struct Scsi_Host, hostdata[0]);
}
/* SCSI -- locking version of get/put ioreqs */
static inline struct csio_ioreq *
csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)
{
struct csio_ioreq *ioreq;
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
ioreq = csio_get_scsi_ioreq(scsim);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
return ioreq;
}
static inline void
csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct csio_ioreq *ioreq)
{
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
csio_put_scsi_ioreq(scsim, ioreq);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
}
/* Called in interrupt context */
static inline void
csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct list_head *reqlist, int n)
{
unsigned long flags;
spin_lock_irqsave(&scsim->freelist_lock, flags);
csio_put_scsi_ioreq_list(scsim, reqlist, n);
spin_unlock_irqrestore(&scsim->freelist_lock, flags);
}
/* Called in interrupt context */
static inline void
csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
struct list_head *reqlist, int n)
{
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
csio_put_scsi_ddp_list(scsim, reqlist, n);
spin_unlock_irqrestore(&hw->lock, flags);
}
#endif /* ifndef __CSIO_INIT_H__ */
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include "csio_init.h"
#include "csio_hw.h"
static irqreturn_t
csio_nondata_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
int rv;
unsigned long flags;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
spin_lock_irqsave(&hw->lock, flags);
csio_hw_slow_intr_handler(hw);
rv = csio_mb_isr_handler(hw);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&hw->lock, flags);
return IRQ_HANDLED;
}
/*
* csio_fwevt_handler - Common FW event handler routine.
* @hw: HW module.
*
* This is the ISR for FW events. It is shared b/w MSIX
* and INTx handlers.
*/
static void
csio_fwevt_handler(struct csio_hw *hw)
{
int rv;
unsigned long flags;
rv = csio_fwevtq_handler(hw);
spin_lock_irqsave(&hw->lock, flags);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return;
}
spin_unlock_irqrestore(&hw->lock, flags);
} /* csio_fwevt_handler */
/*
* csio_fwevt_isr() - FW events MSIX ISR
* @irq:
* @dev_id:
*
* Process WRs on the FW event queue.
*
*/
static irqreturn_t
csio_fwevt_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
csio_fwevt_handler(hw);
return IRQ_HANDLED;
}
/*
* csio_fwevt_isr() - INTx wrapper for handling FW events.
* @irq:
* @dev_id:
*/
void
csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
csio_fwevt_handler(hw);
} /* csio_fwevt_intx_handler */
/*
* csio_process_scsi_cmpl - Process a SCSI WR completion.
* @hw: HW module.
* @wr: The completed WR from the ingress queue.
* @len: Length of the WR.
* @flb: Freelist buffer array.
*
*/
static void
csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *cbfn_q)
{
struct csio_ioreq *ioreq;
uint8_t *scsiwr;
uint8_t subop;
void *cmnd;
unsigned long flags;
ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
if (likely(ioreq)) {
if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
((struct fw_scsi_abrt_cls_wr *)
scsiwr)->sub_opcode_to_chk_all_io);
csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
subop ? "Close" : "Abort",
ioreq, ioreq->wr_status);
spin_lock_irqsave(&hw->lock, flags);
if (subop)
csio_scsi_closed(ioreq,
(struct list_head *)cbfn_q);
else
csio_scsi_aborted(ioreq,
(struct list_head *)cbfn_q);
/*
* We call scsi_done for I/Os that driver thinks aborts
* have timed out. If there is a race caused by FW
* completing abort at the exact same time that the
* driver has deteced the abort timeout, the following
* check prevents calling of scsi_done twice for the
* same command: once from the eh_abort_handler, another
* from csio_scsi_isr_handler(). This also avoids the
* need to check if csio_scsi_cmnd(req) is NULL in the
* fast path.
*/
cmnd = csio_scsi_cmnd(ioreq);
if (unlikely(cmnd == NULL))
list_del_init(&ioreq->sm.sm_list);
spin_unlock_irqrestore(&hw->lock, flags);
if (unlikely(cmnd == NULL))
csio_put_scsi_ioreq_lock(hw,
csio_hw_to_scsim(hw), ioreq);
} else {
spin_lock_irqsave(&hw->lock, flags);
csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
spin_unlock_irqrestore(&hw->lock, flags);
}
}
}
/*
* csio_scsi_isr_handler() - Common SCSI ISR handler.
* @iq: Ingress queue pointer.
*
* Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
* by calling csio_wr_process_iq_idx. If there are completions on the
* isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
* Once done, add these completions onto the freelist.
* This routine is shared b/w MSIX and INTx.
*/
static inline irqreturn_t
csio_scsi_isr_handler(struct csio_q *iq)
{
struct csio_hw *hw = (struct csio_hw *)iq->owner;
LIST_HEAD(cbfn_q);
struct list_head *tmp;
struct csio_scsim *scm;
struct csio_ioreq *ioreq;
int isr_completions = 0;
scm = csio_hw_to_scsim(hw);
if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
&cbfn_q) != 0))
return IRQ_NONE;
/* Call back the completion routines */
list_for_each(tmp, &cbfn_q) {
ioreq = (struct csio_ioreq *)tmp;
isr_completions++;
ioreq->io_cbfn(hw, ioreq);
/* Release ddp buffer if used for this req */
if (unlikely(ioreq->dcopy))
csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
ioreq->nsge);
}
if (isr_completions) {
/* Return the ioreqs back to ioreq->freelist */
csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
isr_completions);
}
return IRQ_HANDLED;
}
/*
* csio_scsi_isr() - SCSI MSIX handler
* @irq:
* @dev_id:
*
* This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
* for handling SCSI completions.
*/
static irqreturn_t
csio_scsi_isr(int irq, void *dev_id)
{
struct csio_q *iq = (struct csio_q *) dev_id;
struct csio_hw *hw;
if (unlikely(!iq))
return IRQ_NONE;
hw = (struct csio_hw *)iq->owner;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
csio_scsi_isr_handler(iq);
return IRQ_HANDLED;
}
/*
* csio_scsi_intx_handler() - SCSI INTx handler
* @irq:
* @dev_id:
*
* This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
* for handling SCSI completions.
*/
void
csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
struct csio_q *iq = priv;
csio_scsi_isr_handler(iq);
} /* csio_scsi_intx_handler */
/*
* csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
* @irq:
* @dev_id:
*
*
*/
static irqreturn_t
csio_fcoe_isr(int irq, void *dev_id)
{
struct csio_hw *hw = (struct csio_hw *) dev_id;
struct csio_q *intx_q = NULL;
int rv;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
if (unlikely(!hw))
return IRQ_NONE;
if (unlikely(pci_channel_offline(hw->pdev))) {
CSIO_INC_STATS(hw, n_pcich_offline);
return IRQ_NONE;
}
/* Disable the interrupt for this PCI function. */
if (hw->intr_mode == CSIO_IM_INTX)
csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
/*
* The read in the following function will flush the
* above write.
*/
if (csio_hw_slow_intr_handler(hw))
ret = IRQ_HANDLED;
/* Get the INTx Forward interrupt IQ. */
intx_q = csio_get_q(hw, hw->intr_iq_idx);
CSIO_DB_ASSERT(intx_q);
/* IQ handler is not possible for intx_q, hence pass in NULL */
if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
ret = IRQ_HANDLED;
spin_lock_irqsave(&hw->lock, flags);
rv = csio_mb_isr_handler(hw);
if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
hw->flags |= CSIO_HWF_FWEVT_PENDING;
spin_unlock_irqrestore(&hw->lock, flags);
schedule_work(&hw->evtq_work);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&hw->lock, flags);
return ret;
}
static void
csio_add_msix_desc(struct csio_hw *hw)
{
int i;
struct csio_msix_entries *entryp = &hw->msix_entries[0];
int k = CSIO_EXTRA_VECS;
int len = sizeof(entryp->desc) - 1;
int cnt = hw->num_sqsets + k;
/* Non-data vector */
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
entryp++;
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
entryp++;
/* Name SCSI vecs */
for (i = k; i < cnt; i++, entryp++) {
memset(entryp->desc, 0, len + 1);
snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
}
}
int
csio_request_irqs(struct csio_hw *hw)
{
int rv, i, j, k = 0;
struct csio_msix_entries *entryp = &hw->msix_entries[0];
struct csio_scsi_cpu_info *info;
if (hw->intr_mode != CSIO_IM_MSIX) {
rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
(hw->intr_mode == CSIO_IM_MSI) ?
0 : IRQF_SHARED,
KBUILD_MODNAME, hw);
if (rv) {
if (hw->intr_mode == CSIO_IM_MSI)
pci_disable_msi(hw->pdev);
csio_err(hw, "Failed to allocate interrupt line.\n");
return -EINVAL;
}
goto out;
}
/* Add the MSIX vector descriptions */
csio_add_msix_desc(hw);
rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k++].dev_id = (void *)hw;
rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k++].dev_id = (void *)hw;
/* Allocate IRQs for SCSI */
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < info->max_cpus; j++, k++) {
struct csio_scsi_qset *sqset = &hw->sqset[i][j];
struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
entryp[k].desc, q);
if (rv) {
csio_err(hw,
"IRQ request failed for vec %d err:%d\n",
entryp[k].vector, rv);
goto err;
}
entryp[k].dev_id = (void *)q;
} /* for all scsi cpus */
} /* for all ports */
out:
hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
return 0;
err:
for (i = 0; i < k; i++) {
entryp = &hw->msix_entries[i];
free_irq(entryp->vector, entryp->dev_id);
}
pci_disable_msix(hw->pdev);
return -EINVAL;
}
static void
csio_disable_msix(struct csio_hw *hw, bool free)
{
int i;
struct csio_msix_entries *entryp;
int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
if (free) {
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
free_irq(entryp->vector, entryp->dev_id);
}
}
pci_disable_msix(hw->pdev);
}
/* Reduce per-port max possible CPUs */
static void
csio_reduce_sqsets(struct csio_hw *hw, int cnt)
{
int i;
struct csio_scsi_cpu_info *info;
while (cnt < hw->num_sqsets) {
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
if (info->max_cpus > 1) {
info->max_cpus--;
hw->num_sqsets--;
if (hw->num_sqsets <= cnt)
break;
}
}
}
csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
}
static int
csio_enable_msix(struct csio_hw *hw)
{
int rv, i, j, k, n, min, cnt;
struct csio_msix_entries *entryp;
struct msix_entry *entries;
int extra = CSIO_EXTRA_VECS;
struct csio_scsi_cpu_info *info;
min = hw->num_pports + extra;
cnt = hw->num_sqsets + extra;
/* Max vectors required based on #niqs configured in fw */
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
cnt = min_t(uint8_t, hw->cfg_niq, cnt);
entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
if (!entries)
return -ENOMEM;
for (i = 0; i < cnt; i++)
entries[i].entry = (uint16_t)i;
csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
cnt = rv;
if (!rv) {
if (cnt < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
csio_reduce_sqsets(hw, cnt - extra);
}
} else {
if (rv > 0) {
pci_disable_msix(hw->pdev);
csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
}
kfree(entries);
return -ENOMEM;
}
/* Save off vectors */
for (i = 0; i < cnt; i++) {
entryp = &hw->msix_entries[i];
entryp->vector = entries[i].vector;
}
/* Distribute vectors */
k = 0;
csio_set_nondata_intr_idx(hw, entries[k].entry);
csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
csio_set_fwevt_intr_idx(hw, entries[k++].entry);
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
n = (j % info->max_cpus) + k;
hw->sqset[i][j].intr_idx = entries[n].entry;
}
k += info->max_cpus;
}
kfree(entries);
return 0;
}
void
csio_intr_enable(struct csio_hw *hw)
{
hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
/* Try MSIX, then MSI or fall back to INTx */
if ((csio_msi == 2) && !csio_enable_msix(hw))
hw->intr_mode = CSIO_IM_MSIX;
else {
/* Max iqs required based on #niqs configured in fw */
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
!csio_is_hw_master(hw)) {
int extra = CSIO_EXTRA_MSI_IQS;
if (hw->cfg_niq < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n",
hw->cfg_niq - extra);
csio_reduce_sqsets(hw, hw->cfg_niq - extra);
}
}
if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
hw->intr_mode = CSIO_IM_MSI;
else
hw->intr_mode = CSIO_IM_INTX;
}
csio_dbg(hw, "Using %s interrupt mode.\n",
(hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
}
void
csio_intr_disable(struct csio_hw *hw, bool free)
{
csio_hw_intr_disable(hw);
switch (hw->intr_mode) {
case CSIO_IM_MSIX:
csio_disable_msix(hw, free);
break;
case CSIO_IM_MSI:
if (free)
free_irq(hw->pdev->irq, hw);
pci_disable_msi(hw->pdev);
break;
case CSIO_IM_INTX:
if (free)
free_irq(hw->pdev->irq, hw);
break;
default:
break;
}
hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
}
此差异已折叠。
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_LNODE_H__
#define __CSIO_LNODE_H__
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <scsi/fc/fc_els.h>
#include "csio_defs.h"
#include "csio_hw.h"
#define CSIO_FCOE_MAX_NPIV 128
#define CSIO_FCOE_MAX_RNODES 2048
/* FDMI port attribute unknown speed */
#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000
extern int csio_fcoe_rnodes;
extern int csio_fdmi_enable;
/* State machine evets */
enum csio_ln_ev {
CSIO_LNE_NONE = (uint32_t)0,
CSIO_LNE_LINKUP,
CSIO_LNE_FAB_INIT_DONE,
CSIO_LNE_LINK_DOWN,
CSIO_LNE_DOWN_LINK,
CSIO_LNE_LOGO,
CSIO_LNE_CLOSE,
CSIO_LNE_MAX_EVENT,
};
struct csio_fcf_info {
struct list_head list;
uint8_t priority;
uint8_t mac[6];
uint8_t name_id[8];
uint8_t fabric[8];
uint16_t vf_id;
uint8_t vlan_id;
uint16_t max_fcoe_size;
uint8_t fc_map[3];
uint32_t fka_adv;
uint32_t fcfi;
uint8_t get_next:1;
uint8_t link_aff:1;
uint8_t fpma:1;
uint8_t spma:1;
uint8_t login:1;
uint8_t portid;
uint8_t spma_mac[6];
struct kref kref;
};
/* Defines for flags */
#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */
#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */
#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */
#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */
/* Transport events */
enum csio_ln_fc_evt {
CSIO_LN_FC_LINKUP = 1,
CSIO_LN_FC_LINKDOWN,
CSIO_LN_FC_RSCN,
CSIO_LN_FC_ATTRIB_UPDATE,
};
/* Lnode stats */
struct csio_lnode_stats {
uint32_t n_link_up; /* Link down */
uint32_t n_link_down; /* Link up */
uint32_t n_err; /* error */
uint32_t n_err_nomem; /* memory not available */
uint32_t n_inval_parm; /* Invalid parameters */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* dropped event */
uint32_t n_rnode_match; /* matched rnode */
uint32_t n_dev_loss_tmo; /* Device loss timeout */
uint32_t n_fdmi_err; /* fdmi err */
uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
uint32_t n_rnode_alloc; /* rnode allocated */
uint32_t n_rnode_free; /* rnode freed */
uint32_t n_rnode_nomem; /* rnode alloc failure */
uint32_t n_input_requests; /* Input Requests */
uint32_t n_output_requests; /* Output Requests */
uint32_t n_control_requests; /* Control Requests */
uint32_t n_input_bytes; /* Input Bytes */
uint32_t n_output_bytes; /* Output Bytes */
uint32_t rsvd1;
};
/* Common Lnode params */
struct csio_lnode_params {
uint32_t ra_tov;
uint32_t fcfi;
uint32_t log_level; /* Module level for debugging */
};
struct csio_service_parms {
struct fc_els_csp csp; /* Common service parms */
uint8_t wwpn[8]; /* WWPN */
uint8_t wwnn[8]; /* WWNN */
struct fc_els_cssp clsp[4]; /* Class service params */
uint8_t vvl[16]; /* Vendor version level */
};
/* Lnode */
struct csio_lnode {
struct csio_sm sm; /* State machine + sibling
* lnode list.
*/
struct csio_hw *hwp; /* Pointer to the HW module */
uint8_t portid; /* Port ID */
uint8_t rsvd1;
uint16_t rsvd2;
uint32_t dev_num; /* Device number */
uint32_t flags; /* Flags */
struct list_head fcf_lsthead; /* FCF entries */
struct csio_fcf_info *fcfinfo; /* FCF in use */
struct csio_ioreq *mgmt_req; /* MGMT request */
/* FCoE identifiers */
uint8_t mac[6];
uint32_t nport_id;
struct csio_service_parms ln_sparm; /* Service parms */
/* Firmware identifiers */
uint32_t fcf_flowid; /*fcf flowid */
uint32_t vnp_flowid;
uint16_t ssn_cnt; /* Registered Session */
uint8_t cur_evt; /* Current event */
uint8_t prev_evt; /* Previous event */
/* Children */
struct list_head cln_head; /* Head of the children lnode
* list.
*/
uint32_t num_vports; /* Total NPIV/children LNodes*/
struct csio_lnode *pln; /* Parent lnode of child
* lnodes.
*/
struct list_head cmpl_q; /* Pending I/Os on this lnode */
/* Remote node information */
struct list_head rnhead; /* Head of rnode list */
uint32_t num_reg_rnodes; /* Number of rnodes registered
* with the host.
*/
uint32_t n_scsi_tgts; /* Number of scsi targets
* found
*/
uint32_t last_scan_ntgts;/* Number of scsi targets
* found per last scan.
*/
uint32_t tgt_scan_tick; /* timer started after
* new tgt found
*/
/* FC transport data */
struct fc_vport *fc_vport;
struct fc_host_statistics fch_stats;
struct csio_lnode_stats stats; /* Common lnode stats */
struct csio_lnode_params params; /* Common lnode params */
};
#define csio_lnode_to_hw(ln) ((ln)->hwp)
#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)
#define csio_parent_lnode(ln) ((ln)->pln)
#define csio_ln_flowid(ln) ((ln)->vnp_flowid)
#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)
#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)
#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)
#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)
#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)
#define csio_ln_dbg(_ln, _fmt, ...) \
csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
#define csio_ln_err(_ln, _fmt, ...) \
csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
#define csio_ln_warn(_ln, _fmt, ...) \
csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
/* HW->Lnode notifications */
enum csio_ln_notify {
CSIO_LN_NOTIFY_HWREADY = 1,
CSIO_LN_NOTIFY_HWSTOP,
CSIO_LN_NOTIFY_HWREMOVE,
CSIO_LN_NOTIFY_HWRESET,
};
void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);
int csio_is_lnode_ready(struct csio_lnode *);
void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);
struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);
int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,
struct fw_fcoe_port_stats *);
int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,
unsigned long, unsigned long);
void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);
void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);
void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);
int csio_ln_fdmi_start(struct csio_lnode *, void *);
int csio_lnode_start(struct csio_lnode *);
void csio_lnode_stop(struct csio_lnode *);
void csio_lnode_close(struct csio_lnode *);
int csio_lnode_init(struct csio_lnode *, struct csio_hw *,
struct csio_lnode *);
void csio_lnode_exit(struct csio_lnode *);
#endif /* ifndef __CSIO_LNODE_H__ */
此差异已折叠。
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_MB_H__
#define __CSIO_MB_H__
#include <linux/timer.h>
#include <linux/completion.h>
#include "t4fw_api.h"
#include "t4fw_api_stor.h"
#include "csio_defs.h"
#define CSIO_STATS_OFFSET (2)
#define CSIO_NUM_STATS_PER_MB (6)
struct fw_fcoe_port_cmd_params {
uint8_t portid;
uint8_t idx;
uint8_t nstats;
};
#define CSIO_DUMP_MB(__hw, __num, __mb) \
csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \
(unsigned long long)csio_rd_reg64(__hw, __mb), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 8), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 16), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 24), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 32), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 40), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 48), \
(unsigned long long)csio_rd_reg64(__hw, __mb + 56))
#define CSIO_MB_MAX_REGS 8
#define CSIO_MAX_MB_SIZE 64
#define CSIO_MB_POLL_FREQ 5 /* 5 ms */
#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT
/* Device master in HELLO command */
enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };
enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };
enum csio_dev_state {
CSIO_DEV_STATE_UNINIT,
CSIO_DEV_STATE_INIT,
CSIO_DEV_STATE_ERR
};
#define FW_PARAM_DEV(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
#define FW_PARAM_PFVF(param) \
(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
FW_PARAMS_PARAM_Y(0) | \
FW_PARAMS_PARAM_Z(0))
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
do { \
if (__clear) \
memset((__cp), 0, \
CSIO_MB_MAX_REGS * sizeof(__be64)); \
INIT_LIST_HEAD(&(__mbp)->list); \
(__mbp)->tmo = (__tmo); \
(__mbp)->priv = (void *)(__priv); \
(__mbp)->mb_cbfn = (__fn); \
(__mbp)->mb_size = sizeof(*(__cp)); \
} while (0)
struct csio_mbm_stats {
uint32_t n_req; /* number of mbox req */
uint32_t n_rsp; /* number of mbox rsp */
uint32_t n_activeq; /* number of mbox req active Q */
uint32_t n_cbfnq; /* number of mbox req cbfn Q */
uint32_t n_tmo; /* number of mbox timeout */
uint32_t n_cancel; /* number of mbox cancel */
uint32_t n_err; /* number of mbox error */
};
/* Driver version of Mailbox */
struct csio_mb {
struct list_head list; /* for req/resp */
/* queue in driver */
__be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */
int mb_size; /* Size of this
* mailbox.
*/
uint32_t tmo; /* Timeout */
struct completion cmplobj; /* MB Completion
* object
*/
void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);
/* Callback fn */
void *priv; /* Owner private ptr */
};
struct csio_mbm {
uint32_t a_mbox; /* Async mbox num */
uint32_t intr_idx; /* Interrupt index */
struct timer_list timer; /* Mbox timer */
struct list_head req_q; /* Mbox request queue */
struct list_head cbfn_q; /* Mbox completion q */
struct csio_mb *mcurrent; /* Current mailbox */
uint32_t req_q_cnt; /* Outstanding mbox
* cmds
*/
struct csio_mbm_stats stats; /* Statistics */
};
#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))
#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)
struct csio_iq_params;
struct csio_eq_params;
enum fw_retval csio_mb_fw_retval(struct csio_mb *);
/* MB helpers */
void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,
uint32_t, uint32_t, enum csio_dev_master,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, enum csio_dev_state *,
uint8_t *);
void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,
unsigned int, unsigned int, const u32 *, u32 *, bool,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, unsigned int , u32 *);
void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
int reg);
void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
bool, bool, bool, bool,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,
uint32_t, uint8_t, unsigned int,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int,
unsigned int, void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
uint8_t, bool, uint32_t, uint16_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, uint16_t *);
void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
void (*)(struct csio_hw *, struct csio_mb *));
void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_iq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, struct csio_iq_params *);
void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_iq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,
uint32_t, struct csio_eq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
enum fw_retval *, struct csio_eq_params *);
void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,
uint32_t , struct csio_eq_params *,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,
uint32_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t , uint32_t , uint16_t,
uint8_t [8], uint8_t [8],
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t , uint32_t ,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t , uint32_t, uint32_t ,
void (*) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,
uint32_t, uint32_t, uint32_t,
void (*cbfn) (struct csio_hw *, struct csio_mb *));
void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,
struct csio_mb *mbp, uint32_t mb_tmo,
struct fw_fcoe_port_cmd_params *portparams,
void (*cbfn)(struct csio_hw *, struct csio_mb *));
void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
enum fw_retval *retval,
struct fw_fcoe_port_cmd_params *portparams,
struct fw_fcoe_port_stats *portstats);
/* MB module functions */
int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
void (*)(uintptr_t));
void csio_mbm_exit(struct csio_mbm *);
void csio_mb_intr_enable(struct csio_hw *);
void csio_mb_intr_disable(struct csio_hw *);
int csio_mb_issue(struct csio_hw *, struct csio_mb *);
void csio_mb_completions(struct csio_hw *, struct list_head *);
int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);
int csio_mb_isr_handler(struct csio_hw *);
struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);
void csio_mb_cancel_all(struct csio_hw *, struct list_head *);
#endif /* ifndef __CSIO_MB_H__ */
此差异已折叠。
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_RNODE_H__
#define __CSIO_RNODE_H__
#include "csio_defs.h"
/* State machine evets */
enum csio_rn_ev {
CSIO_RNFE_NONE = (uint32_t)0, /* None */
CSIO_RNFE_LOGGED_IN, /* [N/F]Port login
* complete.
*/
CSIO_RNFE_PRLI_DONE, /* PRLI completed */
CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */
CSIO_RNFE_PRLI_RECV, /* Received PLOGI */
CSIO_RNFE_LOGO_RECV, /* Received LOGO */
CSIO_RNFE_PRLO_RECV, /* Received PRLO */
CSIO_RNFE_DOWN, /* Rnode is down */
CSIO_RNFE_CLOSE, /* Close rnode */
CSIO_RNFE_NAME_MISSING, /* Rnode name missing
* in name server.
*/
CSIO_RNFE_MAX_EVENT,
};
/* rnode stats */
struct csio_rnode_stats {
uint32_t n_err; /* error */
uint32_t n_err_inval; /* invalid parameter */
uint32_t n_err_nomem; /* error nomem */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* unexpected event */
uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
uint32_t n_lun_rst; /* Number of resets of
* of LUNs under this
* target
*/
uint32_t n_lun_rst_fail; /* Number of LUN reset
* failures.
*/
uint32_t n_tgt_rst; /* Number of target resets */
uint32_t n_tgt_rst_fail; /* Number of target reset
* failures.
*/
};
/* Defines for rnode role */
#define CSIO_RNFR_INITIATOR 0x1
#define CSIO_RNFR_TARGET 0x2
#define CSIO_RNFR_FABRIC 0x4
#define CSIO_RNFR_NS 0x8
#define CSIO_RNFR_NPORT 0x10
struct csio_rnode {
struct csio_sm sm; /* State machine -
* should be the
* 1st member
*/
struct csio_lnode *lnp; /* Pointer to owning
* Lnode */
uint32_t flowid; /* Firmware ID */
struct list_head host_cmpl_q; /* SCSI IOs
* pending to completed
* to Mid-layer.
*/
/* FC identifiers for remote node */
uint32_t nport_id;
uint16_t fcp_flags; /* FCP Flags */
uint8_t cur_evt; /* Current event */
uint8_t prev_evt; /* Previous event */
uint32_t role; /* Fabric/Target/
* Initiator/NS
*/
struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */
struct csio_service_parms rn_sparm;
/* FC transport attributes */
struct fc_rport *rport; /* FC transport rport */
uint32_t supp_classes; /* Supported FC classes */
uint32_t maxframe_size; /* Max Frame size */
uint32_t scsi_id; /* Transport given SCSI id */
struct csio_rnode_stats stats; /* Common rnode stats */
};
#define csio_rn_flowid(rn) ((rn)->flowid)
#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)
#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)
#define csio_rnode_to_lnode(rn) ((rn)->lnp)
int csio_is_rnode_ready(struct csio_rnode *rn);
void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);
struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);
struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,
uint32_t, struct fcoe_rdev_entry *);
void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);
void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
void csio_reg_rnode(struct csio_rnode *);
void csio_unreg_rnode(struct csio_rnode *);
void csio_rnode_devloss_handler(struct csio_rnode *);
#endif /* ifndef __CSIO_RNODE_H__ */
此差异已折叠。
/*
* This file is part of the Chelsio FCoE driver for Linux.
*
* Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CSIO_SCSI_H__
#define __CSIO_SCSI_H__
#include <linux/spinlock_types.h>
#include <linux/completion.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_fcp.h>
#include "csio_defs.h"
#include "csio_wr.h"
extern struct scsi_host_template csio_fcoe_shost_template;
extern struct scsi_host_template csio_fcoe_shost_vport_template;
extern int csio_scsi_eqsize;
extern int csio_scsi_iqlen;
extern int csio_scsi_ioreqs;
extern uint32_t csio_max_scan_tmo;
extern uint32_t csio_delta_scan_tmo;
extern int csio_lun_qdepth;
/*
**************************** NOTE *******************************
* How do we calculate MAX FCoE SCSI SGEs? Here is the math:
* Max Egress WR size = 512 bytes
* One SCSI egress WR has the following fixed no of bytes:
* 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR
* + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD
* ------
* 80
* ------
* That leaves us with 512 - 96 = 432 bytes for data SGE. Using
* struct ulptx_sgl header for the SGE consumes:
* - 4 bytes for cmnd_sge.
* - 12 bytes for the first SGL.
* That leaves us with 416 bytes for the remaining SGE pairs. Which is
* is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,
* or 34 SGEs. Adding the first SGE fetches us 35 SGEs.
*/
#define CSIO_SCSI_MAX_SGE 35
#define CSIO_SCSI_ABRT_TMO_MS 60000
#define CSIO_SCSI_LUNRST_TMO_MS 60000
#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than
* all TM timeouts.
*/
#define CSIO_SCSI_IQ_WRSZ 128
#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)
#define CSIO_MAX_SNS_LEN 128
#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)
/* Reference to scsi_cmnd */
#define csio_scsi_cmnd(req) ((req)->scratch1)
struct csio_scsi_stats {
uint64_t n_tot_success; /* Total number of good I/Os */
uint32_t n_rn_nr_error; /* No. of remote-node-not-
* ready errors
*/
uint32_t n_hw_nr_error; /* No. of hw-module-not-
* ready errors
*/
uint32_t n_dmamap_error; /* No. of DMA map erros */
uint32_t n_unsupp_sge_error; /* No. of too-many-SGes
* errors.
*/
uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */
uint32_t n_busy_error; /* No. of -EBUSY errors */
uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */
uint32_t n_rsperror; /* No. of response errors */
uint32_t n_autosense; /* No. of auto sense replies */
uint32_t n_ovflerror; /* No. of overflow errors */
uint32_t n_unflerror; /* No. of underflow errors */
uint32_t n_rdev_nr_error;/* No. of rdev not
* ready errors
*/
uint32_t n_rdev_lost_error;/* No. of rdev lost errors */
uint32_t n_rdev_logo_error;/* No. of rdev logo errors */
uint32_t n_link_down_error;/* No. of link down errors */
uint32_t n_no_xchg_error; /* No. no exchange error */
uint32_t n_unknown_error;/* No. of unhandled errors */
uint32_t n_aborted; /* No. of aborted I/Os */
uint32_t n_abrt_timedout; /* No. of abort timedouts */
uint32_t n_abrt_fail; /* No. of abort failures */
uint32_t n_abrt_dups; /* No. of duplicate aborts */
uint32_t n_abrt_race_comp; /* No. of aborts that raced
* with completions.
*/
uint32_t n_abrt_busy_error;/* No. of abort failures
* due to -EBUSY.
*/
uint32_t n_closed; /* No. of closed I/Os */
uint32_t n_cls_busy_error; /* No. of close failures
* due to -EBUSY.
*/
uint32_t n_active; /* No. of IOs in active_q */
uint32_t n_tm_active; /* No. of TMs in active_q */
uint32_t n_wcbfn; /* No. of I/Os in worker
* cbfn q
*/
uint32_t n_free_ioreq; /* No. of freelist entries */
uint32_t n_free_ddp; /* No. of DDP freelist */
uint32_t n_unaligned; /* No. of Unaligned SGls */
uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */
uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/
};
struct csio_scsim {
struct csio_hw *hw; /* Pointer to HW moduel */
uint8_t max_sge; /* Max SGE */
uint8_t proto_cmd_len; /* Proto specific SCSI
* cmd length
*/
uint16_t proto_rsp_len; /* Proto specific SCSI
* response length
*/
spinlock_t freelist_lock; /* Lock for ioreq freelist */
struct list_head active_q; /* Outstanding SCSI I/Os */
struct list_head ioreq_freelist; /* Free list of ioreq's */
struct list_head ddp_freelist; /* DDP descriptor freelist */
struct csio_scsi_stats stats; /* This module's statistics */
};
/* State machine defines */
enum csio_scsi_ev {
CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */
CSIO_SCSIE_START_TM, /* Start a TM IO */
CSIO_SCSIE_COMPLETED, /* IO Completed */
CSIO_SCSIE_ABORT, /* Abort IO */
CSIO_SCSIE_ABORTED, /* IO Aborted */
CSIO_SCSIE_CLOSE, /* Close exchange */
CSIO_SCSIE_CLOSED, /* Exchange closed */
CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually
* cleanup this I/O.
*/
};
enum csio_scsi_lev {
CSIO_LEV_ALL = 1,
CSIO_LEV_LNODE,
CSIO_LEV_RNODE,
CSIO_LEV_LUN,
};
struct csio_scsi_level_data {
enum csio_scsi_lev level;
struct csio_rnode *rnode;
struct csio_lnode *lnode;
uint64_t oslun;
};
static inline struct csio_ioreq *
csio_get_scsi_ioreq(struct csio_scsim *scm)
{
struct csio_sm *req;
if (likely(!list_empty(&scm->ioreq_freelist))) {
req = list_first_entry(&scm->ioreq_freelist,
struct csio_sm, sm_list);
list_del_init(&req->sm_list);
CSIO_DEC_STATS(scm, n_free_ioreq);
return (struct csio_ioreq *)req;
} else
return NULL;
}
static inline void
csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)
{
list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
CSIO_INC_STATS(scm, n_free_ioreq);
}
static inline void
csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,
int n)
{
list_splice_init(reqlist, &scm->ioreq_freelist);
scm->stats.n_free_ioreq += n;
}
static inline struct csio_dma_buf *
csio_get_scsi_ddp(struct csio_scsim *scm)
{
struct csio_dma_buf *ddp;
if (likely(!list_empty(&scm->ddp_freelist))) {
ddp = list_first_entry(&scm->ddp_freelist,
struct csio_dma_buf, list);
list_del_init(&ddp->list);
CSIO_DEC_STATS(scm, n_free_ddp);
return ddp;
} else
return NULL;
}
static inline void
csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)
{
list_add_tail(&ddp->list, &scm->ddp_freelist);
CSIO_INC_STATS(scm, n_free_ddp);
}
static inline void
csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,
int n)
{
list_splice_tail_init(reqlist, &scm->ddp_freelist);
scm->stats.n_free_ddp += n;
}
static inline void
csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);
if (csio_list_deleted(&ioreq->sm.sm_list))
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);
list_add_tail(&ioreq->sm.sm_list, cbfn_q);
}
static inline void
csio_scsi_drvcleanup(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);
}
/*
* csio_scsi_start_io - Kick starts the IO SM.
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_start_io(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);
return ioreq->drv_status;
}
/*
* csio_scsi_start_tm - Kicks off the Task management IO SM.
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_start_tm(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);
return ioreq->drv_status;
}
/*
* csio_scsi_abort - Abort an IO request
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_abort(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);
return ioreq->drv_status;
}
/*
* csio_scsi_close - Close an IO request
* @req: io request SM.
*
* needs to be called with lock held.
*/
static inline int
csio_scsi_close(struct csio_ioreq *ioreq)
{
csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);
return ioreq->drv_status;
}
void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);
int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);
int csio_scsim_cleanup_io_lnode(struct csio_scsim *,
struct csio_lnode *);
struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,
struct csio_fl_dma_buf *,
void *, uint8_t **);
int csio_scsi_qconfig(struct csio_hw *);
int csio_scsim_init(struct csio_scsim *, struct csio_hw *);
void csio_scsim_exit(struct csio_scsim *);
#endif /* __CSIO_SCSI_H__ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册