提交 0fbc81b3 编写于 作者: H Hariprasad Shenai 提交者: David S. Miller

chcr/cxgb4i/cxgbit/RDMA/cxgb4: Allocate resources dynamically for all cxgb4 ULD's

Allocate resources dynamically to cxgb4's Upper layer driver's(ULD) like
cxgbit, iw_cxgb4 and cxgb4i. Allocate resources when they register with
cxgb4 driver and free them while unregistering. All the queues and the
interrupts for them will be allocated during ULD probe only and freed
during remove.
Signed-off-by: NHariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e8bc8f9a
...@@ -39,12 +39,10 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { ...@@ -39,12 +39,10 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler, [CPL_FW6_PLD] = cpl_fw6_pld_handler,
}; };
static struct cxgb4_pci_uld_info chcr_uld_info = { static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME, .name = DRV_MODULE_NAME,
.nrxq = 4, .nrxq = MAX_ULD_QSETS,
.rxq_size = 1024, .rxq_size = 1024,
.nciq = 0,
.ciq_size = 0,
.add = chcr_uld_add, .add = chcr_uld_add,
.state_change = chcr_uld_state_change, .state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler, .rx_handler = chcr_uld_rx_handler,
...@@ -205,7 +203,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state) ...@@ -205,7 +203,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void) static int __init chcr_crypto_init(void)
{ {
if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) { if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
pr_err("ULD register fail: No chcr crypto support in cxgb4"); pr_err("ULD register fail: No chcr crypto support in cxgb4");
return -1; return -1;
} }
...@@ -228,7 +226,7 @@ static void __exit chcr_crypto_exit(void) ...@@ -228,7 +226,7 @@ static void __exit chcr_crypto_exit(void)
kfree(u_ctx); kfree(u_ctx);
} }
mutex_unlock(&dev_mutex); mutex_unlock(&dev_mutex);
cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1); cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
} }
module_init(chcr_crypto_init); module_init(chcr_crypto_init);
......
...@@ -1475,6 +1475,10 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) ...@@ -1475,6 +1475,10 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static struct cxgb4_uld_info c4iw_uld_info = { static struct cxgb4_uld_info c4iw_uld_info = {
.name = DRV_NAME, .name = DRV_NAME,
.nrxq = MAX_ULD_QSETS,
.rxq_size = 511,
.ciq = true,
.lro = false,
.add = c4iw_uld_add, .add = c4iw_uld_add,
.rx_handler = c4iw_uld_rx_handler, .rx_handler = c4iw_uld_rx_handler,
.state_change = c4iw_uld_state_change, .state_change = c4iw_uld_state_change,
......
...@@ -437,11 +437,6 @@ enum { ...@@ -437,11 +437,6 @@ enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */ MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
/* # of streaming iSCSIT Rx queues */
MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
}; };
enum { enum {
...@@ -458,8 +453,7 @@ enum { ...@@ -458,8 +453,7 @@ enum {
enum { enum {
INGQ_EXTRAS = 2, /* firmware event queue and */ INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */ /* forwarded interrupts */
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES + MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
}; };
struct adapter; struct adapter;
...@@ -704,10 +698,6 @@ struct sge { ...@@ -704,10 +698,6 @@ struct sge {
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info **uld_rxq_info; struct sge_uld_rxq_info **uld_rxq_info;
...@@ -717,15 +707,8 @@ struct sge { ...@@ -717,15 +707,8 @@ struct sge {
u16 max_ethqsets; /* # of available Ethernet queue sets */ u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */ u16 ethtxq_rover; /* Tx queue to clean up next */
u16 iscsiqsets; /* # of active iSCSI queue sets */ u16 ofldqsets; /* # of active ofld queue sets */
u16 niscsitq; /* # of available iSCST Rx queues */
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 nqs_per_uld; /* # of Rx queues per ULD */
u16 iscsi_rxq[MAX_OFLD_QSETS];
u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
u16 rdma_rxq[MAX_RDMA_QUEUES];
u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS]; u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS]; u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */ u32 fl_pg_order; /* large page allocation size */
...@@ -749,10 +732,7 @@ struct sge { ...@@ -749,10 +732,7 @@ struct sge {
}; };
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++) #define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
struct l2t_data; struct l2t_data;
...@@ -786,6 +766,7 @@ struct uld_msix_bmap { ...@@ -786,6 +766,7 @@ struct uld_msix_bmap {
struct uld_msix_info { struct uld_msix_info {
unsigned short vec; unsigned short vec;
char desc[IFNAMSIZ + 10]; char desc[IFNAMSIZ + 10];
unsigned int idx;
}; };
struct vf_info { struct vf_info {
...@@ -818,7 +799,7 @@ struct adapter { ...@@ -818,7 +799,7 @@ struct adapter {
} msix_info[MAX_INGQ + 1]; } msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */ struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */ struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
unsigned int msi_idx; int msi_idx;
struct doorbell_stats db_stats; struct doorbell_stats db_stats;
struct sge sge; struct sge sge;
...@@ -836,9 +817,10 @@ struct adapter { ...@@ -836,9 +817,10 @@ struct adapter {
unsigned int clipt_start; unsigned int clipt_start;
unsigned int clipt_end; unsigned int clipt_end;
struct clip_tbl *clipt; struct clip_tbl *clipt;
struct cxgb4_pci_uld_info *uld; struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX]; void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld; unsigned int num_uld;
unsigned int num_ofld_uld;
struct list_head list_node; struct list_head list_node;
struct list_head rcu_node; struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
...@@ -858,6 +840,8 @@ struct adapter { ...@@ -858,6 +840,8 @@ struct adapter {
#define T4_OS_LOG_MBOX_CMDS 256 #define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log; struct mbox_cmd_log *mbox_log;
struct mutex uld_mutex;
struct dentry *debugfs_root; struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is bool trace_rss; /* 1 implies that different RSS flit per filter is
...@@ -1051,6 +1035,11 @@ static inline int is_pci_uld(const struct adapter *adap) ...@@ -1051,6 +1035,11 @@ static inline int is_pci_uld(const struct adapter *adap)
return adap->params.crypto; return adap->params.crypto;
} }
static inline int is_uld(const struct adapter *adap)
{
return (adap->params.offload || adap->params.crypto);
}
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{ {
return readl(adap->regs + reg_addr); return readl(adap->regs + reg_addr);
...@@ -1277,6 +1266,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, ...@@ -1277,6 +1266,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid, struct net_device *dev, unsigned int iqid,
unsigned int cmplqid); unsigned int cmplqid);
int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
unsigned int cmplqid);
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid); struct net_device *dev, unsigned int iqid);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie); irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
...@@ -1635,7 +1626,9 @@ void t4_idma_monitor(struct adapter *adapter, ...@@ -1635,7 +1626,9 @@ void t4_idma_monitor(struct adapter *adapter,
int hz, int ticks); int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr); unsigned int naddr, u8 *addr);
void uld_mem_free(struct adapter *adap); void t4_uld_mem_free(struct adapter *adap);
int uld_mem_alloc(struct adapter *adap); int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
void t4_register_netevent_notifier(void);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
#endif /* __CXGB4_H__ */ #endif /* __CXGB4_H__ */
...@@ -2432,17 +2432,11 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) ...@@ -2432,17 +2432,11 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{ {
struct adapter *adap = seq->private; struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4); int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1; int i, r = (uintptr_t)v - 1;
int iscsi_idx = r - eth_entries; int ofld_idx = r - eth_entries;
int iscsit_idx = iscsi_idx - iscsi_entries; int ctrl_idx = ofld_idx - ofld_entries;
int rdma_idx = iscsit_idx - iscsit_entries;
int ciq_idx = rdma_idx - rdma_entries;
int ctrl_idx = ciq_idx - ciq_entries;
int fq_idx = ctrl_idx - ctrl_entries; int fq_idx = ctrl_idx - ctrl_entries;
if (r) if (r)
...@@ -2518,119 +2512,17 @@ do { \ ...@@ -2518,119 +2512,17 @@ do { \
RL("FLLow:", fl.low); RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving); RL("FLStarving:", fl.starving);
} else if (iscsi_idx < iscsi_entries) { } else if (ofld_idx < ofld_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.iscsirxq[iscsi_idx * 4];
const struct sge_ofld_txq *tx = const struct sge_ofld_txq *tx =
&adap->sge.ofldtxq[iscsi_idx * 4]; &adap->sge.ofldtxq[ofld_idx * 4];
int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx); int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
S("QType:", "iSCSI"); S("QType:", "OFLD-Txq");
T("TxQ ID:", q.cntxt_id); T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size); T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use); T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx); T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx); T("TxQ PIDX:", q.pidx);
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxImmPkts:", stats.imm);
RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
} else if (iscsit_idx < iscsit_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.iscsitrxq[iscsit_idx * 4];
int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
S("QType:", "iSCSIT");
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxImmPkts:", stats.imm);
RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
} else if (rdma_idx < rdma_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.rdmarxq[rdma_idx * 4];
int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
S("QType:", "RDMA-CPL");
S("Interface:",
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
R("FL ID:", fl.cntxt_id);
R("FL size:", fl.size - 8);
R("FL pend:", fl.pend_cred);
R("FL avail:", fl.avail);
R("FL PIDX:", fl.pidx);
R("FL CIDX:", fl.cidx);
RL("RxPackets:", stats.pkts);
RL("RxImmPkts:", stats.imm);
RL("RxNoMem:", stats.nomem);
RL("FLAllocErr:", fl.alloc_failed);
RL("FLLrgAlcErr:", fl.large_alloc_failed);
RL("FLMapErr:", fl.mapping_err);
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
} else if (ciq_idx < ciq_entries) {
const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
S("QType:", "RDMA-CIQ");
S("Interface:",
rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
R("RspQ ID:", rspq.abs_id);
R("RspQ size:", rspq.size);
R("RspQE size:", rspq.iqe_len);
R("RspQ CIDX:", rspq.cidx);
R("RspQ Gen:", rspq.gen);
S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
S3("u", "Intr pktcnt:",
adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
RL("RxAN:", stats.an);
RL("RxNoMem:", stats.nomem);
} else if (ctrl_idx < ctrl_entries) { } else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
...@@ -2672,10 +2564,7 @@ do { \ ...@@ -2672,10 +2564,7 @@ do { \
static int sge_queue_entries(const struct adapter *adap) static int sge_queue_entries(const struct adapter *adap)
{ {
return DIV_ROUND_UP(adap->sge.ethqsets, 4) + return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
DIV_ROUND_UP(adap->sge.iscsiqsets, 4) + DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
DIV_ROUND_UP(adap->sge.niscsitq, 4) +
DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
} }
......
...@@ -82,6 +82,24 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) ...@@ -82,6 +82,24 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
spin_unlock_irqrestore(&bmap->lock, flags); spin_unlock_irqrestore(&bmap->lock, flags);
} }
/* Flush the aggregated lro sessions */
static void uldrx_flush_handler(struct sge_rspq *q)
{
struct adapter *adap = q->adap;
if (adap->uld[q->uld].lro_flush)
adap->uld[q->uld].lro_flush(&q->lro_mgr);
}
/**
* uldrx_handler - response queue handler for ULD queues
* @q: the response queue that received the packet
* @rsp: the response queue descriptor holding the offload message
* @gl: the gather list of packet fragments
*
* Deliver an ingress offload packet to a ULD. All processing is done by
* the ULD, we just maintain statistics.
*/
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl) const struct pkt_gl *gl)
{ {
...@@ -124,8 +142,8 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -124,8 +142,8 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
unsigned short *ids = rxq_info->rspq_id + offset; unsigned short *ids = rxq_info->rspq_id + offset;
unsigned int per_chan = nq / adap->params.nports; unsigned int per_chan = nq / adap->params.nports;
unsigned int msi_idx, bmap_idx; unsigned int bmap_idx = 0;
int i, err; int i, err, msi_idx;
if (adap->flags & USING_MSIX) if (adap->flags & USING_MSIX)
msi_idx = 1; msi_idx = 1;
...@@ -135,14 +153,14 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -135,14 +153,14 @@ static int alloc_uld_rxqs(struct adapter *adap,
for (i = 0; i < nq; i++, q++) { for (i = 0; i < nq; i++, q++) {
if (msi_idx >= 0) { if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap); bmap_idx = get_msix_idx_from_bmap(adap);
adap->msi_idx++; msi_idx = adap->msix_info_ulds[bmap_idx].idx;
} }
err = t4_sge_alloc_rxq(adap, &q->rspq, false, err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan], adap->port[i / per_chan],
adap->msi_idx, msi_idx,
q->fl.size ? &q->fl : NULL, q->fl.size ? &q->fl : NULL,
uldrx_handler, uldrx_handler,
NULL, lro ? uldrx_flush_handler : NULL,
0); 0);
if (err) if (err)
goto freeout; goto freeout;
...@@ -159,7 +177,6 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -159,7 +177,6 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (q->rspq.desc) if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq, free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL); q->fl.size ? &q->fl : NULL);
adap->msi_idx--;
} }
/* We need to free rxq also in case of ciq allocation failure */ /* We need to free rxq also in case of ciq allocation failure */
...@@ -169,7 +186,6 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -169,7 +186,6 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (q->rspq.desc) if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq, free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL); q->fl.size ? &q->fl : NULL);
adap->msi_idx--;
} }
} }
return err; return err;
...@@ -178,17 +194,38 @@ static int alloc_uld_rxqs(struct adapter *adap, ...@@ -178,17 +194,38 @@ static int alloc_uld_rxqs(struct adapter *adap,
int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int i, ret = 0;
if (adap->flags & USING_MSIX) { if (adap->flags & USING_MSIX) {
rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq, rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
sizeof(unsigned short),
GFP_KERNEL); GFP_KERNEL);
if (!rxq_info->msix_tbl) if (!rxq_info->msix_tbl)
return -ENOMEM; return -ENOMEM;
} }
return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
!alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
rxq_info->nrxq, lro)); rxq_info->nrxq, lro));
/* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE &&
!ret && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
unsigned int cmplqid;
u32 param, cmdop;
cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
for_each_port(adap, i) {
cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(cmdop) |
FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
ret = t4_set_params(adap, adap->mbox, adap->pf,
0, 1, &param, &cmplqid);
}
}
return ret;
} }
static void t4_free_uld_rxqs(struct adapter *adap, int n, static void t4_free_uld_rxqs(struct adapter *adap, int n,
...@@ -198,7 +235,6 @@ static void t4_free_uld_rxqs(struct adapter *adap, int n, ...@@ -198,7 +235,6 @@ static void t4_free_uld_rxqs(struct adapter *adap, int n,
if (q->rspq.desc) if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq, free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL); q->fl.size ? &q->fl : NULL);
adap->msi_idx--;
} }
} }
...@@ -206,6 +242,21 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) ...@@ -206,6 +242,21 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
struct sge *s = &adap->sge;
u32 param, cmdop, cmplqid = 0;
int i;
cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
for_each_port(adap, i) {
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(cmdop) |
FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
t4_set_params(adap, adap->mbox, adap->pf,
0, 1, &param, &cmplqid);
}
}
if (rxq_info->nciq) if (rxq_info->nciq)
t4_free_uld_rxqs(adap, rxq_info->nciq, t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq); rxq_info->uldrxq + rxq_info->nrxq);
...@@ -215,26 +266,38 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) ...@@ -215,26 +266,38 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
} }
int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
const struct cxgb4_pci_uld_info *uld_info) const struct cxgb4_uld_info *uld_info)
{ {
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info; struct sge_uld_rxq_info *rxq_info;
int i, nrxq; int i, nrxq, ciq_size;
rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
if (!rxq_info) if (!rxq_info)
return -ENOMEM; return -ENOMEM;
if (uld_info->nrxq > s->nqs_per_uld) if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
rxq_info->nrxq = s->nqs_per_uld; i = s->nqs_per_uld;
else rxq_info->nrxq = roundup(i, adap->params.nports);
rxq_info->nrxq = uld_info->nrxq; } else {
if (!uld_info->nciq) i = min_t(int, uld_info->nrxq,
num_online_cpus());
rxq_info->nrxq = roundup(i, adap->params.nports);
}
if (!uld_info->ciq) {
rxq_info->nciq = 0; rxq_info->nciq = 0;
else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld) } else {
rxq_info->nciq = s->nqs_per_uld; if (adap->flags & USING_MSIX)
rxq_info->nciq = min_t(int, s->nqs_per_uld,
num_online_cpus());
else else
rxq_info->nciq = uld_info->nciq; rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
num_online_cpus());
rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
adap->params.nports);
rxq_info->nciq = max_t(int, rxq_info->nciq,
adap->params.nports);
}
nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
...@@ -259,12 +322,17 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, ...@@ -259,12 +322,17 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
r->fl.size = 72; r->fl.size = 72;
} }
ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
if (ciq_size > SGE_MAX_IQ_SIZE) {
dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
ciq_size = SGE_MAX_IQ_SIZE;
}
for (i = rxq_info->nrxq; i < nrxq; i++) { for (i = rxq_info->nrxq; i < nrxq; i++) {
struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64); init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
r->rspq.uld = uld_type; r->rspq.uld = uld_type;
r->fl.size = 72;
} }
memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
...@@ -285,7 +353,8 @@ void free_queues_uld(struct adapter *adap, unsigned int uld_type) ...@@ -285,7 +353,8 @@ void free_queues_uld(struct adapter *adap, unsigned int uld_type)
int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx, bmap_idx, err = 0; int err = 0;
unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) { for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx]; bmap_idx = rxq_info->msix_tbl[idx];
...@@ -310,10 +379,10 @@ int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) ...@@ -310,10 +379,10 @@ int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int idx; unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) { for_each_uldrxq(rxq_info, idx) {
unsigned int bmap_idx = rxq_info->msix_tbl[idx]; bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx); free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec, free_irq(adap->msix_info_ulds[bmap_idx].vec,
...@@ -325,10 +394,10 @@ void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) ...@@ -325,10 +394,10 @@ void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
{ {
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int n = sizeof(adap->msix_info_ulds[0].desc); int n = sizeof(adap->msix_info_ulds[0].desc);
int idx; unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) { for_each_uldrxq(rxq_info, idx) {
unsigned int bmap_idx = rxq_info->msix_tbl[idx]; bmap_idx = rxq_info->msix_tbl[idx];
snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
adap->port[0]->name, rxq_info->name, idx); adap->port[0]->name, rxq_info->name, idx);
...@@ -390,15 +459,15 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type, ...@@ -390,15 +459,15 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
lli->nciq = rxq_info->nciq; lli->nciq = rxq_info->nciq;
} }
int uld_mem_alloc(struct adapter *adap) int t4_uld_mem_alloc(struct adapter *adap)
{ {
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL); adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
if (!adap->uld) if (!adap->uld)
return -ENOMEM; return -ENOMEM;
s->uld_rxq_info = kzalloc(adap->num_uld * s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
sizeof(struct sge_uld_rxq_info *), sizeof(struct sge_uld_rxq_info *),
GFP_KERNEL); GFP_KERNEL);
if (!s->uld_rxq_info) if (!s->uld_rxq_info)
...@@ -410,7 +479,7 @@ int uld_mem_alloc(struct adapter *adap) ...@@ -410,7 +479,7 @@ int uld_mem_alloc(struct adapter *adap)
return -ENOMEM; return -ENOMEM;
} }
void uld_mem_free(struct adapter *adap) void t4_uld_mem_free(struct adapter *adap)
{ {
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
...@@ -418,6 +487,26 @@ void uld_mem_free(struct adapter *adap) ...@@ -418,6 +487,26 @@ void uld_mem_free(struct adapter *adap)
kfree(adap->uld); kfree(adap->uld);
} }
void t4_uld_clean_up(struct adapter *adap)
{
struct sge_uld_rxq_info *rxq_info;
unsigned int i;
if (!adap->uld)
return;
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)
continue;
rxq_info = adap->sge.uld_rxq_info[i];
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, i);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, i);
free_sge_queues_uld(adap, i);
free_queues_uld(adap, i);
}
}
static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
{ {
int i; int i;
...@@ -429,10 +518,15 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) ...@@ -429,10 +518,15 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port; lld->ports = adap->port;
lld->vr = &adap->vres; lld->vr = &adap->vres;
lld->mtus = adap->params.mtus; lld->mtus = adap->params.mtus;
lld->ntxq = adap->sge.iscsiqsets; lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports; lld->nchan = adap->params.nports;
lld->nports = adap->params.nports; lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred; lld->wr_cred = adap->params.ofldq_wr_cred;
lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
lld->iscsi_ppm = &adap->iscsi_ppm;
lld->adapter_type = adap->params.chip; lld->adapter_type = adap->params.chip;
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp; lld->udb_density = 1 << adap->params.sge.eq_qpp;
...@@ -472,23 +566,37 @@ static void uld_attach(struct adapter *adap, unsigned int uld) ...@@ -472,23 +566,37 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
} }
adap->uld[uld].handle = handle; adap->uld[uld].handle = handle;
t4_register_netevent_notifier();
if (adap->flags & FULL_INIT_DONE) if (adap->flags & FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP); adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
} }
int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, /**
struct cxgb4_pci_uld_info *p) * cxgb4_register_uld - register an upper-layer driver
* @type: the ULD type
* @p: the ULD methods
*
* Registers an upper-layer driver with this driver and notifies the ULD
* about any presently available devices that support its type. Returns
* %-EBUSY if a ULD of the same type is already registered.
*/
int cxgb4_register_uld(enum cxgb4_uld type,
const struct cxgb4_uld_info *p)
{ {
int ret = 0; int ret = 0;
unsigned int adap_idx = 0;
struct adapter *adap; struct adapter *adap;
if (type >= CXGB4_PCI_ULD_MAX) if (type >= CXGB4_ULD_MAX)
return -EINVAL; return -EINVAL;
mutex_lock(&uld_mutex); mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) { list_for_each_entry(adap, &adapter_list, list_node) {
if (!is_pci_uld(adap)) if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
(type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
continue;
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue; continue;
ret = cfg_queues_uld(adap, type, p); ret = cfg_queues_uld(adap, type, p);
if (ret) if (ret)
...@@ -510,11 +618,14 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, ...@@ -510,11 +618,14 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
} }
adap->uld[type] = *p; adap->uld[type] = *p;
uld_attach(adap, type); uld_attach(adap, type);
adap_idx++;
} }
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
return 0; return 0;
free_irq: free_irq:
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX) if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type); free_msix_queue_irqs_uld(adap, type);
free_rxq: free_rxq:
...@@ -522,21 +633,49 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, ...@@ -522,21 +633,49 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
free_queues: free_queues:
free_queues_uld(adap, type); free_queues_uld(adap, type);
out: out:
list_for_each_entry(adap, &adapter_list, list_node) {
if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
(type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
continue;
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
if (!adap_idx)
break;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type);
free_queues_uld(adap, type);
adap_idx--;
}
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL(cxgb4_register_pci_uld); EXPORT_SYMBOL(cxgb4_register_uld);
int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type) /**
* cxgb4_unregister_uld - unregister an upper-layer driver
* @type: the ULD type
*
* Unregisters an existing upper-layer driver.
*/
int cxgb4_unregister_uld(enum cxgb4_uld type)
{ {
struct adapter *adap; struct adapter *adap;
if (type >= CXGB4_PCI_ULD_MAX) if (type >= CXGB4_ULD_MAX)
return -EINVAL; return -EINVAL;
mutex_lock(&uld_mutex); mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) { list_for_each_entry(adap, &adapter_list, list_node) {
if (!is_pci_uld(adap)) if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
(type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
continue;
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue; continue;
adap->uld[type].handle = NULL; adap->uld[type].handle = NULL;
adap->uld[type].add = NULL; adap->uld[type].add = NULL;
...@@ -551,4 +690,4 @@ int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type) ...@@ -551,4 +690,4 @@ int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
return 0; return 0;
} }
EXPORT_SYMBOL(cxgb4_unregister_pci_uld); EXPORT_SYMBOL(cxgb4_unregister_uld);
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include "cxgb4.h" #include "cxgb4.h"
#define MAX_ULD_QSETS 16
/* CPL message priority levels */ /* CPL message priority levels */
enum { enum {
CPL_PRIORITY_DATA = 0, /* data messages */ CPL_PRIORITY_DATA = 0, /* data messages */
...@@ -189,9 +191,11 @@ static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) ...@@ -189,9 +191,11 @@ static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
} }
enum cxgb4_uld { enum cxgb4_uld {
CXGB4_ULD_INIT,
CXGB4_ULD_RDMA, CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI, CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT, CXGB4_ULD_ISCSIT,
CXGB4_ULD_CRYPTO,
CXGB4_ULD_MAX CXGB4_ULD_MAX
}; };
...@@ -284,31 +288,11 @@ struct cxgb4_lld_info { ...@@ -284,31 +288,11 @@ struct cxgb4_lld_info {
struct cxgb4_uld_info { struct cxgb4_uld_info {
const char *name; const char *name;
void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl);
int (*state_change)(void *handle, enum cxgb4_state new_state);
int (*control)(void *handle, enum cxgb4_control control, ...);
int (*lro_rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl,
struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
};
enum cxgb4_pci_uld {
CXGB4_PCI_ULD1,
CXGB4_PCI_ULD_MAX
};
struct cxgb4_pci_uld_info {
const char *name;
bool lro;
void *handle; void *handle;
unsigned int nrxq; unsigned int nrxq;
unsigned int nciq;
unsigned int rxq_size; unsigned int rxq_size;
unsigned int ciq_size; bool ciq;
bool lro;
void *(*add)(const struct cxgb4_lld_info *p); void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp, int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl); const struct pkt_gl *gl);
...@@ -323,9 +307,6 @@ struct cxgb4_pci_uld_info { ...@@ -323,9 +307,6 @@ struct cxgb4_pci_uld_info {
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_unregister_uld(enum cxgb4_uld type);
int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
struct cxgb4_pci_uld_info *p);
int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev); unsigned int cxgb4_port_chan(const struct net_device *dev);
......
...@@ -2860,6 +2860,18 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, ...@@ -2860,6 +2860,18 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return 0; return 0;
} }
int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
unsigned int cmplqid)
{
u32 param, val;
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
FW_PARAMS_PARAM_YZ_V(eqid));
val = cmplqid;
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid) struct net_device *dev, unsigned int iqid)
{ {
...@@ -3014,12 +3026,6 @@ void t4_free_sge_resources(struct adapter *adap) ...@@ -3014,12 +3026,6 @@ void t4_free_sge_resources(struct adapter *adap)
} }
} }
/* clean up RDMA and iSCSI Rx queues */
t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
/* clean up offload Tx queues */ /* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
......
...@@ -84,6 +84,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *); ...@@ -84,6 +84,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *);
static const struct cxgb4_uld_info cxgb4i_uld_info = { static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME, .name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.lro = false,
.add = t4_uld_add, .add = t4_uld_add,
.rx_handler = t4_uld_rx_handler, .rx_handler = t4_uld_rx_handler,
.state_change = t4_uld_state_change, .state_change = t4_uld_state_change,
......
...@@ -652,6 +652,9 @@ static struct iscsit_transport cxgbit_transport = { ...@@ -652,6 +652,9 @@ static struct iscsit_transport cxgbit_transport = {
static struct cxgb4_uld_info cxgbit_uld_info = { static struct cxgb4_uld_info cxgbit_uld_info = {
.name = DRV_NAME, .name = DRV_NAME,
.nrxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.lro = true,
.add = cxgbit_uld_add, .add = cxgbit_uld_add,
.state_change = cxgbit_uld_state_change, .state_change = cxgbit_uld_state_change,
.lro_rx_handler = cxgbit_uld_lro_rx_handler, .lro_rx_handler = cxgbit_uld_lro_rx_handler,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册