diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b818bd6d1fb5f5c1e20ef86a7e88c9f835ca526b..22bccd87c5d29c748e5467fab133c451739c509e 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -637,21 +637,16 @@ static int send_flowc(struct c4iw_ep *ep) static int send_halfclose(struct c4iw_ep *ep) { - struct cpl_close_con_req *req; struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); - int wrlen = roundup(sizeof *req, 16); + u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (WARN_ON(!skb)) return -ENOMEM; - set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - t4_set_arp_err_handler(skb, NULL, arp_failure_discard); - req = (struct cpl_close_con_req *) skb_put(skb, wrlen); - memset(req, 0, wrlen); - INIT_TP_WR(req, ep->hwtid); - OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, - ep->hwtid)); + cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx, + NULL, arp_failure_discard); + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h index fbb973e9ec29e921b1a2c0ff0ae195c3a7b72330..e77661d98738b5d6cccdab62ff25aefb3eeaeecd 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h @@ -38,6 +38,7 @@ #include #include +#include void cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type, @@ -96,4 +97,19 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan) OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); } + +static inline void +cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan, + void *handle, arp_err_handler_t handler) +{ + struct cpl_close_con_req *req; + + req = (struct cpl_close_con_req *)__skb_put(skb, len); + memset(req, 0, len); + + INIT_TP_WR(req, tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); + set_wr_txq(skb, CPL_PRIORITY_DATA, chan); + t4_set_arp_err_handler(skb, handle, handler); +} #endif diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 994058f0c4e01b065ace85b7e5d2282ee60b3155..a8f5f360414f0c6e5c5b33cb25914fddfac458a5 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -615,21 +615,14 @@ void cxgbit_free_np(struct iscsi_np *np) static void cxgbit_send_halfclose(struct cxgbit_sock *csk) { struct sk_buff *skb; - struct cpl_close_con_req *req; - unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16); + u32 len = roundup(sizeof(struct cpl_close_con_req), 16); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return; - req = (struct cpl_close_con_req *)__skb_put(skb, len); - memset(req, 0, len); - - set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); - INIT_TP_WR(req, csk->tid); - OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, - csk->tid)); - req->rsvd = 0; + cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx, + NULL, NULL); cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL; __skb_queue_tail(&csk->txq, skb);