提交 be4c9bad 编写于 作者: R Roland Dreier

MAINTAINERS: Add cxgb4 and iw_cxgb4 entries

Signed-off-by: NRoland Dreier <rolandd@cisco.com>
上级 cfdda9d7
...@@ -1719,6 +1719,20 @@ W: http://www.openfabrics.org ...@@ -1719,6 +1719,20 @@ W: http://www.openfabrics.org
S: Supported S: Supported
F: drivers/infiniband/hw/cxgb3/ F: drivers/infiniband/hw/cxgb3/
CXGB4 ETHERNET DRIVER (CXGB4)
M: Dimitris Michailidis <dm@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
F: drivers/net/cxgb4/
CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
M: Steve Wise <swise@chelsio.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org
S: Supported
F: drivers/infiniband/hw/cxgb4/
CYBERPRO FB DRIVER CYBERPRO FB DRIVER
M: Russell King <linux@arm.linux.org.uk> M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
......
...@@ -61,6 +61,10 @@ static char *states[] = { ...@@ -61,6 +61,10 @@ static char *states[] = {
NULL, NULL,
}; };
int c4iw_max_read_depth = 8;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
static int enable_tcp_timestamps; static int enable_tcp_timestamps;
module_param(enable_tcp_timestamps, int, 0644); module_param(enable_tcp_timestamps, int, 0644);
MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
...@@ -113,18 +117,17 @@ static int snd_win = 32 * 1024; ...@@ -113,18 +117,17 @@ static int snd_win = 32 * 1024;
module_param(snd_win, int, 0644); module_param(snd_win, int, 0644);
MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
static void process_work(struct work_struct *work);
static struct workqueue_struct *workq; static struct workqueue_struct *workq;
static DECLARE_WORK(skb_work, process_work);
static struct sk_buff_head rxq; static struct sk_buff_head rxq;
static c4iw_handler_func work_handlers[NUM_CPL_CMDS];
c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg); static void ep_timeout(unsigned long arg);
static void connect_reply_upcall(struct c4iw_ep *ep, int status); static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static LIST_HEAD(timeout_list);
static spinlock_t timeout_lock;
static void start_ep_timer(struct c4iw_ep *ep) static void start_ep_timer(struct c4iw_ep *ep)
{ {
PDBG("%s ep %p\n", __func__, ep); PDBG("%s ep %p\n", __func__, ep);
...@@ -271,26 +274,6 @@ static void release_ep_resources(struct c4iw_ep *ep) ...@@ -271,26 +274,6 @@ static void release_ep_resources(struct c4iw_ep *ep)
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
} }
static void process_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct c4iw_dev *dev;
struct cpl_act_establish *rpl = cplhdr(skb);
unsigned int opcode;
int ret;
while ((skb = skb_dequeue(&rxq))) {
rpl = cplhdr(skb);
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
BUG_ON(!work_handlers[opcode]);
ret = work_handlers[opcode](dev, skb);
if (!ret)
kfree_skb(skb);
}
}
static int status2errno(int status) static int status2errno(int status)
{ {
switch (status) { switch (status) {
...@@ -1795,76 +1778,6 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1795,76 +1778,6 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
return 0; return 0;
} }
static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
struct c4iw_wr_wait *wr_waitp;
int ret;
PDBG("%s type %u\n", __func__, rpl->type);
switch (rpl->type) {
case 1:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
if (wr_waitp) {
wr_waitp->ret = ret;
wr_waitp->done = 1;
wake_up(&wr_waitp->wait);
}
break;
case 2:
c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
break;
default:
printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
rpl->type);
break;
}
return 0;
}
static void ep_timeout(unsigned long arg)
{
struct c4iw_ep *ep = (struct c4iw_ep *)arg;
struct c4iw_qp_attributes attrs;
unsigned long flags;
int abort = 1;
spin_lock_irqsave(&ep->com.lock, flags);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
switch (ep->com.state) {
case MPA_REQ_SENT:
__state_set(&ep->com, ABORTING);
connect_reply_upcall(ep, -ETIMEDOUT);
break;
case MPA_REQ_WAIT:
__state_set(&ep->com, ABORTING);
break;
case CLOSING:
case MORIBUND:
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_ERROR;
c4iw_modify_qp(ep->com.qp->rhp,
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
}
__state_set(&ep->com, ABORTING);
break;
default:
printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, ep->com.state);
WARN_ON(1);
abort = 0;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
if (abort)
abort_connection(ep, NULL, GFP_ATOMIC);
c4iw_put_ep(&ep->com);
}
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{ {
int err; int err;
...@@ -1904,8 +1817,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1904,8 +1817,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
BUG_ON(!qp); BUG_ON(!qp);
if ((conn_param->ord > T4_MAX_READ_DEPTH) || if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > T4_MAX_READ_DEPTH)) { (conn_param->ird > c4iw_max_read_depth)) {
abort_connection(ep, NULL, GFP_KERNEL); abort_connection(ep, NULL, GFP_KERNEL);
err = -EINVAL; err = -EINVAL;
goto err; goto err;
...@@ -1968,6 +1881,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1968,6 +1881,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct net_device *pdev; struct net_device *pdev;
int step; int step;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
err = -EINVAL;
goto out;
}
ep = alloc_ep(sizeof(*ep), GFP_KERNEL); ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) { if (!ep) {
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
...@@ -2115,7 +2033,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -2115,7 +2033,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
*/ */
ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
if (ep->stid == -1) { if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto fail2; goto fail2;
} }
...@@ -2243,6 +2161,116 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) ...@@ -2243,6 +2161,116 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
return ret; return ret;
} }
/*
* These are the real handlers that are called from a
* work queue.
*/
static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_ACT_ESTABLISH] = act_establish,
[CPL_ACT_OPEN_RPL] = act_open_rpl,
[CPL_RX_DATA] = rx_data,
[CPL_ABORT_RPL_RSS] = abort_rpl,
[CPL_ABORT_RPL] = abort_rpl,
[CPL_PASS_OPEN_RPL] = pass_open_rpl,
[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
[CPL_PASS_ESTABLISH] = pass_establish,
[CPL_PEER_CLOSE] = peer_close,
[CPL_ABORT_REQ_RSS] = peer_abort,
[CPL_CLOSE_CON_RPL] = close_con_rpl,
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack
};
static void process_timeout(struct c4iw_ep *ep)
{
struct c4iw_qp_attributes attrs;
int abort = 1;
spin_lock_irq(&ep->com.lock);
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
ep->com.state);
switch (ep->com.state) {
case MPA_REQ_SENT:
__state_set(&ep->com, ABORTING);
connect_reply_upcall(ep, -ETIMEDOUT);
break;
case MPA_REQ_WAIT:
__state_set(&ep->com, ABORTING);
break;
case CLOSING:
case MORIBUND:
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_ERROR;
c4iw_modify_qp(ep->com.qp->rhp,
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
}
__state_set(&ep->com, ABORTING);
break;
default:
printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, ep->com.state);
WARN_ON(1);
abort = 0;
}
spin_unlock_irq(&ep->com.lock);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
c4iw_put_ep(&ep->com);
}
static void process_timedout_eps(void)
{
struct c4iw_ep *ep;
spin_lock_irq(&timeout_lock);
while (!list_empty(&timeout_list)) {
struct list_head *tmp;
tmp = timeout_list.next;
list_del(tmp);
spin_unlock_irq(&timeout_lock);
ep = list_entry(tmp, struct c4iw_ep, entry);
process_timeout(ep);
spin_lock_irq(&timeout_lock);
}
spin_unlock_irq(&timeout_lock);
}
static void process_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct c4iw_dev *dev;
struct cpl_act_establish *rpl = cplhdr(skb);
unsigned int opcode;
int ret;
while ((skb = skb_dequeue(&rxq))) {
rpl = cplhdr(skb);
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
BUG_ON(!work_handlers[opcode]);
ret = work_handlers[opcode](dev, skb);
if (!ret)
kfree_skb(skb);
}
process_timedout_eps();
}
static DECLARE_WORK(skb_work, process_work);
static void ep_timeout(unsigned long arg)
{
struct c4iw_ep *ep = (struct c4iw_ep *)arg;
spin_lock(&timeout_lock);
list_add_tail(&ep->entry, &timeout_list);
spin_unlock(&timeout_lock);
queue_work(workq, &skb_work);
}
/* /*
* All the CM events are handled on a work queue to have a safe context. * All the CM events are handled on a work queue to have a safe context.
*/ */
...@@ -2273,58 +2301,74 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2273,58 +2301,74 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0; return 0;
} }
static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_fw6_msg *rpl = cplhdr(skb);
struct c4iw_wr_wait *wr_waitp;
int ret;
PDBG("%s type %u\n", __func__, rpl->type);
switch (rpl->type) {
case 1:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
if (wr_waitp) {
wr_waitp->ret = ret;
wr_waitp->done = 1;
wake_up(&wr_waitp->wait);
}
break;
case 2:
c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
break;
default:
printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
rpl->type);
break;
}
return 0;
}
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
*/
c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_ACT_ESTABLISH] = sched,
[CPL_ACT_OPEN_RPL] = sched,
[CPL_RX_DATA] = sched,
[CPL_ABORT_RPL_RSS] = sched,
[CPL_ABORT_RPL] = sched,
[CPL_PASS_OPEN_RPL] = sched,
[CPL_CLOSE_LISTSRV_RPL] = sched,
[CPL_PASS_ACCEPT_REQ] = sched,
[CPL_PASS_ESTABLISH] = sched,
[CPL_PEER_CLOSE] = sched,
[CPL_CLOSE_CON_RPL] = sched,
[CPL_ABORT_REQ_RSS] = sched,
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
[CPL_FW6_MSG] = fw6_msg
};
int __init c4iw_cm_init(void) int __init c4iw_cm_init(void)
{ {
spin_lock_init(&timeout_lock);
skb_queue_head_init(&rxq); skb_queue_head_init(&rxq);
workq = create_singlethread_workqueue("iw_cxgb4"); workq = create_singlethread_workqueue("iw_cxgb4");
if (!workq) if (!workq)
return -ENOMEM; return -ENOMEM;
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
*/
c4iw_handlers[CPL_ACT_ESTABLISH] = sched;
c4iw_handlers[CPL_ACT_OPEN_RPL] = sched;
c4iw_handlers[CPL_RX_DATA] = sched;
c4iw_handlers[CPL_ABORT_RPL_RSS] = sched;
c4iw_handlers[CPL_ABORT_RPL] = sched;
c4iw_handlers[CPL_PASS_OPEN_RPL] = sched;
c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched;
c4iw_handlers[CPL_PASS_ESTABLISH] = sched;
c4iw_handlers[CPL_PEER_CLOSE] = sched;
c4iw_handlers[CPL_CLOSE_CON_RPL] = sched;
c4iw_handlers[CPL_ABORT_REQ_RSS] = sched;
c4iw_handlers[CPL_RDMA_TERMINATE] = sched;
c4iw_handlers[CPL_FW4_ACK] = sched;
c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
c4iw_handlers[CPL_FW6_MSG] = fw6_msg;
/*
* These are the real handlers that are called from a
* work queue.
*/
work_handlers[CPL_ACT_ESTABLISH] = act_establish;
work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
work_handlers[CPL_RX_DATA] = rx_data;
work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
work_handlers[CPL_ABORT_RPL] = abort_rpl;
work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
work_handlers[CPL_PEER_CLOSE] = peer_close;
work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
work_handlers[CPL_RDMA_TERMINATE] = terminate;
work_handlers[CPL_FW4_ACK] = fw4_ack;
return 0; return 0;
} }
void __exit c4iw_cm_term(void) void __exit c4iw_cm_term(void)
{ {
WARN_ON(!list_empty(&timeout_list));
flush_workqueue(workq); flush_workqueue(workq);
destroy_workqueue(workq); destroy_workqueue(workq);
} }
...@@ -51,8 +51,8 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, ...@@ -51,8 +51,8 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
return; return;
} }
printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x\n", __func__, "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
...@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, ...@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->attr.state == C4IW_QP_STATE_RTS) { if (qhp->attr.state == C4IW_QP_STATE_RTS) {
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 0); &attrs, 1);
} }
event.event = ib_event; event.event = ib_event;
......
...@@ -597,6 +597,7 @@ struct c4iw_ep { ...@@ -597,6 +597,7 @@ struct c4iw_ep {
struct c4iw_ep_common com; struct c4iw_ep_common com;
struct c4iw_ep *parent_ep; struct c4iw_ep *parent_ep;
struct timer_list timer; struct timer_list timer;
struct list_head entry;
unsigned int atid; unsigned int atid;
u32 hwtid; u32 hwtid;
u32 snd_seq; u32 snd_seq;
...@@ -739,5 +740,6 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); ...@@ -739,5 +740,6 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
extern struct cxgb4_client t4c_client; extern struct cxgb4_client t4c_client;
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
extern int c4iw_max_read_depth;
#endif #endif
...@@ -267,8 +267,8 @@ static int c4iw_query_device(struct ib_device *ibdev, ...@@ -267,8 +267,8 @@ static int c4iw_query_device(struct ib_device *ibdev,
props->max_qp_wr = T4_MAX_QP_DEPTH; props->max_qp_wr = T4_MAX_QP_DEPTH;
props->max_sge = T4_MAX_RECV_SGE; props->max_sge = T4_MAX_RECV_SGE;
props->max_sge_rd = 1; props->max_sge_rd = 1;
props->max_qp_rd_atom = T4_MAX_READ_DEPTH; props->max_qp_rd_atom = c4iw_max_read_depth;
props->max_qp_init_rd_atom = T4_MAX_READ_DEPTH; props->max_qp_init_rd_atom = c4iw_max_read_depth;
props->max_cq = T4_MAX_NUM_CQ; props->max_cq = T4_MAX_NUM_CQ;
props->max_cqe = T4_MAX_CQ_DEPTH; props->max_cqe = T4_MAX_CQ_DEPTH;
props->max_mr = c4iw_num_stags(&dev->rdev); props->max_mr = c4iw_num_stags(&dev->rdev);
......
...@@ -856,7 +856,8 @@ int c4iw_post_zb_read(struct c4iw_qp *qhp) ...@@ -856,7 +856,8 @@ int c4iw_post_zb_read(struct c4iw_qp *qhp)
return c4iw_ofld_send(&qhp->rhp->rdev, skb); return c4iw_ofld_send(&qhp->rhp->rdev, skb);
} }
int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
gfp_t gfp)
{ {
struct fw_ri_wr *wqe; struct fw_ri_wr *wqe;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -865,9 +866,9 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) ...@@ -865,9 +866,9 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid); qhp->ep->hwtid);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); skb = alloc_skb(sizeof *wqe, gfp);
if (!skb) if (!skb)
return -ENOMEM; return;
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
...@@ -881,7 +882,7 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) ...@@ -881,7 +882,7 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
term = (struct terminate_message *)wqe->u.terminate.termmsg; term = (struct terminate_message *)wqe->u.terminate.termmsg;
build_term_codes(err_cqe, &term->layer_etype, &term->ecode); build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
return c4iw_ofld_send(&qhp->rhp->rdev, skb); c4iw_ofld_send(&qhp->rhp->rdev, skb);
} }
/* /*
...@@ -1130,14 +1131,14 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1130,14 +1131,14 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
newattr.enable_bind = attrs->enable_bind; newattr.enable_bind = attrs->enable_bind;
if (mask & C4IW_QP_ATTR_MAX_ORD) { if (mask & C4IW_QP_ATTR_MAX_ORD) {
if (attrs->max_ord > T4_MAX_READ_DEPTH) { if (attrs->max_ord > c4iw_max_read_depth) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
newattr.max_ord = attrs->max_ord; newattr.max_ord = attrs->max_ord;
} }
if (mask & C4IW_QP_ATTR_MAX_IRD) { if (mask & C4IW_QP_ATTR_MAX_IRD) {
if (attrs->max_ird > T4_MAX_READ_DEPTH) { if (attrs->max_ird > c4iw_max_read_depth) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -1215,12 +1216,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1215,12 +1216,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.state = C4IW_QP_STATE_TERMINATE; qhp->attr.state = C4IW_QP_STATE_TERMINATE;
if (qhp->ibqp.uobject) if (qhp->ibqp.uobject)
t4_set_wq_in_error(&qhp->wq); t4_set_wq_in_error(&qhp->wq);
if (!internal) { ep = qhp->ep;
ep = qhp->ep; c4iw_get_ep(&ep->com);
c4iw_get_ep(&ep->com); terminate = 1;
terminate = 1; disconnect = 1;
disconnect = 1;
}
break; break;
case C4IW_QP_STATE_ERROR: case C4IW_QP_STATE_ERROR:
qhp->attr.state = C4IW_QP_STATE_ERROR; qhp->attr.state = C4IW_QP_STATE_ERROR;
...@@ -1301,7 +1300,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1301,7 +1300,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
if (terminate) if (terminate)
c4iw_post_terminate(qhp, NULL); post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
/* /*
* If disconnect is 1, then we need to initiate a disconnect * If disconnect is 1, then we need to initiate a disconnect
...@@ -1309,7 +1308,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1309,7 +1308,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
* an abnormal close (RTS/CLOSING->ERROR). * an abnormal close (RTS/CLOSING->ERROR).
*/ */
if (disconnect) { if (disconnect) {
c4iw_ep_disconnect(ep, abort, GFP_KERNEL); c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
GFP_KERNEL);
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
} }
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include "t4_msg.h" #include "t4_msg.h"
#include "t4fw_ri_api.h" #include "t4fw_ri_api.h"
#define T4_MAX_READ_DEPTH 16
#define T4_QID_BASE 1024 #define T4_QID_BASE 1024
#define T4_MAX_QIDS 256 #define T4_MAX_QIDS 256
#define T4_MAX_NUM_QP (1<<16) #define T4_MAX_NUM_QP (1<<16)
...@@ -450,11 +449,25 @@ struct t4_cq { ...@@ -450,11 +449,25 @@ struct t4_cq {
static inline int t4_arm_cq(struct t4_cq *cq, int se) static inline int t4_arm_cq(struct t4_cq *cq, int se)
{ {
u32 val; u32 val;
u16 inc;
val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
INGRESSQID(cq->cqid); do {
cq->cidx_inc = 0; /*
writel(val, cq->gts); * inc must be less the both the max update value -and-
* the size of the CQ.
*/
inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
CIDXINC_MASK;
inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
if (inc == cq->cidx_inc)
val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
INGRESSQID(cq->cqid);
else
val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
INGRESSQID(cq->cqid);
cq->cidx_inc -= inc;
writel(val, cq->gts);
} while (cq->cidx_inc);
return 0; return 0;
} }
...@@ -489,11 +502,12 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) ...@@ -489,11 +502,12 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{ {
int ret = 0; int ret = 0;
u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
*cqe = &cq->queue[cq->cidx]; *cqe = &cq->queue[cq->cidx];
cq->timestamp = CQE_TS(*cqe); cq->timestamp = G_CQE_TS(bits_type_ts);
} else if (CQE_TS(&cq->queue[cq->cidx]) > cq->timestamp) } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
ret = -EOVERFLOW; ret = -EOVERFLOW;
else else
ret = -ENODATA; ret = -ENODATA;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册