提交 2fd36865 编写于 作者: M Mike Marciniszyn 提交者: Greg Kroah-Hartman

staging/rdma/hfi1: add common routine for queuing acks

This patch is a prelimary patch required to
coalesce acks.

The routine to "schedule" a QP for sending a NAK is
now centralized in rc_defer_ack().  The flag is changed
for clarity since the all acks will potentially use
the deferral  mechanism.
Reviewed-by: NDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: NMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 46b010d3
......@@ -714,8 +714,8 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
*/
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait);
if (qp->r_flags & HFI1_R_RSP_NAK) {
qp->r_flags &= ~HFI1_R_RSP_NAK;
if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
hfi1_send_rc_ack(rcd, qp, 0);
}
if (qp->r_flags & HFI1_R_RSP_SEND) {
......
......@@ -1608,6 +1608,16 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
return;
}
static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
struct hfi1_qp *qp)
{
if (list_empty(&qp->rspwait)) {
qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
/**
* rc_rcv_error - process an incoming duplicate or error RC packet
* @ohdr: the other headers for this packet
......@@ -1650,11 +1660,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
* in the receive queue have been processed.
* Otherwise, we end up propagating congestion.
*/
if (list_empty(&qp->rspwait)) {
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
rc_defered_ack(rcd, qp);
}
goto done;
}
......@@ -2337,11 +2343,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
qp->r_ack_psn = qp->r_psn;
/* Queue RNR NAK for later */
if (list_empty(&qp->rspwait)) {
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
rc_defered_ack(rcd, qp);
return;
nack_op_err:
......@@ -2349,11 +2351,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
rc_defered_ack(rcd, qp);
return;
nack_inv_unlck:
......@@ -2363,11 +2361,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_nak_state = IB_NAK_INVALID_REQUEST;
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
rc_defered_ack(rcd, qp);
return;
nack_acc_unlck:
......@@ -2421,13 +2415,7 @@ void hfi1_rc_hdrerr(
* Otherwise, we end up
* propagating congestion.
*/
if (list_empty(&qp->rspwait)) {
qp->r_flags |= HFI1_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(
&qp->rspwait,
&rcd->qp_wait_list);
}
rc_defered_ack(rcd, qp);
} /* Out of sequence NAK */
} /* QP Request NAKs */
}
......@@ -553,11 +553,13 @@ struct hfi1_qp {
/*
* Bit definitions for r_flags.
*/
#define HFI1_R_REUSE_SGE 0x01
#define HFI1_R_RDMAR_SEQ 0x02
#define HFI1_R_RSP_NAK 0x04
#define HFI1_R_RSP_SEND 0x08
#define HFI1_R_COMM_EST 0x10
#define HFI1_R_REUSE_SGE 0x01
#define HFI1_R_RDMAR_SEQ 0x02
/* defer ack until end of interrupt session */
#define HFI1_R_RSP_DEFERED_ACK 0x04
/* relay ack to send engine */
#define HFI1_R_RSP_SEND 0x08
#define HFI1_R_COMM_EST 0x10
/*
* Bit definitions for s_flags.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册