提交 98e891b5 编写于 作者: B Bob Pearson 提交者: Jason Gunthorpe

RDMA/rxe: Remove qp->req.state

The rxe driver has four different QP state variables,
    qp->attr.qp_state,
    qp->req.state,
    qp->comp.state, and
    qp->resp.state.
All of these basically carry the same information.

This patch replaces uses of qp->req.state by qp->attr.qp_state and enum
rxe_qp_state.  This is the third of three patches which will remove all
but the qp->attr.qp_state variable. This will bring the driver closer to
the IBA description.

Link: https://lore.kernel.org/r/20230405042611.6467-3-rpearsonhpe@gmail.comSigned-off-by: NBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: NJason Gunthorpe <jgg@nvidia.com>
上级 f55efc2e
...@@ -491,12 +491,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, ...@@ -491,12 +491,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
} }
} }
if (unlikely(qp->req.state == QP_STATE_DRAIN)) { if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
/* state_lock used by requester & completer */ /* state_lock used by requester & completer */
spin_lock_bh(&qp->state_lock); spin_lock_bh(&qp->state_lock);
if ((qp->req.state == QP_STATE_DRAIN) && if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
(qp->comp.psn == qp->req.psn)) { qp->attr.sq_draining = 0;
qp->req.state = QP_STATE_DRAINED;
spin_unlock_bh(&qp->state_lock); spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
...@@ -723,7 +722,7 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -723,7 +722,7 @@ int rxe_completer(struct rxe_qp *qp)
* (4) the timeout parameter is set * (4) the timeout parameter is set
*/ */
if ((qp_type(qp) == IB_QPT_RC) && if ((qp_type(qp) == IB_QPT_RC) &&
(qp->req.state == QP_STATE_READY) && (qp_state(qp) >= IB_QPS_RTS) &&
(psn_compare(qp->req.psn, qp->comp.psn) > 0) && (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
qp->qp_timeout_jiffies) qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer, mod_timer(&qp->retrans_timer,
......
...@@ -413,8 +413,8 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -413,8 +413,8 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
int is_request = pkt->mask & RXE_REQ_MASK; int is_request = pkt->mask & RXE_REQ_MASK;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
if ((is_request && (qp->req.state != QP_STATE_READY)) || if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
(!is_request && (qp_state(qp) <= IB_QPS_RTR))) { (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
goto drop; goto drop;
} }
......
...@@ -231,7 +231,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, ...@@ -231,7 +231,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->req.wqe_index = queue_get_producer(qp->sq.queue, qp->req.wqe_index = queue_get_producer(qp->sq.queue,
QUEUE_TYPE_FROM_CLIENT); QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
qp->req.opcode = -1; qp->req.opcode = -1;
qp->comp.opcode = -1; qp->comp.opcode = -1;
...@@ -394,12 +393,9 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, ...@@ -394,12 +393,9 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
goto err1; goto err1;
} }
if (mask & IB_QP_STATE) { if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
if (cur_state == IB_QPS_SQD) { if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
if (qp->req.state == QP_STATE_DRAIN && goto err1;
new_state != IB_QPS_ERR)
goto err1;
}
} }
if (mask & IB_QP_PORT) { if (mask & IB_QP_PORT) {
...@@ -474,9 +470,6 @@ static void rxe_qp_reset(struct rxe_qp *qp) ...@@ -474,9 +470,6 @@ static void rxe_qp_reset(struct rxe_qp *qp)
rxe_disable_task(&qp->comp.task); rxe_disable_task(&qp->comp.task);
rxe_disable_task(&qp->req.task); rxe_disable_task(&qp->req.task);
/* move qp to the reset state */
qp->req.state = QP_STATE_RESET;
/* drain work and packet queuesc */ /* drain work and packet queuesc */
rxe_requester(qp); rxe_requester(qp);
rxe_completer(qp); rxe_completer(qp);
...@@ -512,22 +505,9 @@ static void rxe_qp_reset(struct rxe_qp *qp) ...@@ -512,22 +505,9 @@ static void rxe_qp_reset(struct rxe_qp *qp)
rxe_enable_task(&qp->req.task); rxe_enable_task(&qp->req.task);
} }
/* drain the send queue */
static void rxe_qp_drain(struct rxe_qp *qp)
{
if (qp->sq.queue) {
if (qp->req.state != QP_STATE_DRAINED) {
qp->req.state = QP_STATE_DRAIN;
rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task);
}
}
}
/* move the qp to the error state */ /* move the qp to the error state */
void rxe_qp_error(struct rxe_qp *qp) void rxe_qp_error(struct rxe_qp *qp)
{ {
qp->req.state = QP_STATE_ERROR;
qp->attr.qp_state = IB_QPS_ERR; qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */ /* drain work and packet queues */
...@@ -540,6 +520,8 @@ void rxe_qp_error(struct rxe_qp *qp) ...@@ -540,6 +520,8 @@ void rxe_qp_error(struct rxe_qp *qp)
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
struct ib_udata *udata) struct ib_udata *udata)
{ {
enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
attr->cur_qp_state : qp->attr.qp_state;
int err; int err;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) { if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
...@@ -656,7 +638,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -656,7 +638,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
case IB_QPS_INIT: case IB_QPS_INIT:
rxe_dbg_qp(qp, "state -> INIT\n"); rxe_dbg_qp(qp, "state -> INIT\n");
qp->req.state = QP_STATE_INIT;
break; break;
case IB_QPS_RTR: case IB_QPS_RTR:
...@@ -665,12 +646,15 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -665,12 +646,15 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
case IB_QPS_RTS: case IB_QPS_RTS:
rxe_dbg_qp(qp, "state -> RTS\n"); rxe_dbg_qp(qp, "state -> RTS\n");
qp->req.state = QP_STATE_READY;
break; break;
case IB_QPS_SQD: case IB_QPS_SQD:
rxe_dbg_qp(qp, "state -> SQD\n"); rxe_dbg_qp(qp, "state -> SQD\n");
rxe_qp_drain(qp); if (cur_state != IB_QPS_SQD) {
qp->attr.sq_draining = 1;
rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task);
}
break; break;
case IB_QPS_SQE: case IB_QPS_SQE:
...@@ -708,16 +692,11 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) ...@@ -708,16 +692,11 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
if (qp->req.state == QP_STATE_DRAIN) { /* Applications that get this state typically spin on it.
attr->sq_draining = 1; * Yield the processor
/* applications that get this state */
* typically spin on it. yield the if (qp->attr.sq_draining)
* processor
*/
cond_resched(); cond_resched();
} else {
attr->sq_draining = 0;
}
rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining); rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining);
......
...@@ -39,11 +39,12 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ...@@ -39,11 +39,12 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
} }
if (pkt->mask & RXE_REQ_MASK) { if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp_state(qp) <= IB_QPS_RTR)) if (unlikely(qp_state(qp) < IB_QPS_RTR))
return -EINVAL; return -EINVAL;
} else if (unlikely(qp->req.state < QP_STATE_READY || } else {
qp->req.state > QP_STATE_DRAINED)) if (unlikely(qp_state(qp) < IB_QPS_RTS))
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -120,13 +120,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) ...@@ -120,13 +120,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
if (unlikely(qp->req.state == QP_STATE_DRAIN)) { if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
/* check to see if we are drained; /* check to see if we are drained;
* state_lock used by requester and completer * state_lock used by requester and completer
*/ */
spin_lock_bh(&qp->state_lock); spin_lock_bh(&qp->state_lock);
do { do {
if (qp->req.state != QP_STATE_DRAIN) { if (!qp->attr.sq_draining) {
/* comp just finished */ /* comp just finished */
spin_unlock_bh(&qp->state_lock); spin_unlock_bh(&qp->state_lock);
break; break;
...@@ -139,7 +139,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) ...@@ -139,7 +139,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
break; break;
} }
qp->req.state = QP_STATE_DRAINED; qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
...@@ -159,8 +159,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) ...@@ -159,8 +159,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
wqe = queue_addr_from_index(q, index); wqe = queue_addr_from_index(q, index);
if (unlikely((qp->req.state == QP_STATE_DRAIN || if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
qp->req.state == QP_STATE_DRAINED) &&
(wqe->state != wqe_state_processing))) (wqe->state != wqe_state_processing)))
return NULL; return NULL;
...@@ -656,7 +655,7 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -656,7 +655,7 @@ int rxe_requester(struct rxe_qp *qp)
if (unlikely(!qp->valid)) if (unlikely(!qp->valid))
goto exit; goto exit;
if (unlikely(qp->req.state == QP_STATE_ERROR)) { if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
wqe = req_next_wqe(qp); wqe = req_next_wqe(qp);
if (wqe) if (wqe)
/* /*
...@@ -667,7 +666,7 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -667,7 +666,7 @@ int rxe_requester(struct rxe_qp *qp)
goto exit; goto exit;
} }
if (unlikely(qp->req.state == QP_STATE_RESET)) { if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
qp->req.wqe_index = queue_get_consumer(q, qp->req.wqe_index = queue_get_consumer(q,
QUEUE_TYPE_FROM_CLIENT); QUEUE_TYPE_FROM_CLIENT);
qp->req.opcode = -1; qp->req.opcode = -1;
...@@ -836,7 +835,7 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -836,7 +835,7 @@ int rxe_requester(struct rxe_qp *qp)
/* update wqe_index for each wqe completion */ /* update wqe_index for each wqe completion */
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error; wqe->state = wqe_state_error;
qp->req.state = QP_STATE_ERROR; qp->attr.qp_state = IB_QPS_ERR;
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
exit: exit:
ret = -EAGAIN; ret = -EAGAIN;
......
...@@ -881,7 +881,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, ...@@ -881,7 +881,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
if (!err) if (!err)
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
if (unlikely(qp->req.state == QP_STATE_ERROR)) if (unlikely(qp_state(qp) == IB_QPS_ERR))
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
return err; return err;
...@@ -900,7 +900,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -900,7 +900,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto err_out; goto err_out;
} }
if (unlikely(qp->req.state < QP_STATE_READY)) { if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
*bad_wr = wr; *bad_wr = wr;
err = -EINVAL; err = -EINVAL;
rxe_dbg_qp(qp, "qp not ready to send"); rxe_dbg_qp(qp, "qp not ready to send");
......
...@@ -102,17 +102,7 @@ struct rxe_srq { ...@@ -102,17 +102,7 @@ struct rxe_srq {
int error; int error;
}; };
enum rxe_qp_state {
QP_STATE_RESET,
QP_STATE_INIT,
QP_STATE_READY,
QP_STATE_DRAIN, /* req only */
QP_STATE_DRAINED, /* req only */
QP_STATE_ERROR
};
struct rxe_req_info { struct rxe_req_info {
enum rxe_qp_state state;
int wqe_index; int wqe_index;
u32 psn; u32 psn;
int opcode; int opcode;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册