提交 8e3b6883 编写于 作者: L Leon Romanovsky 提交者: Jason Gunthorpe

RDMA/mlx5: Delete unreachable handle_atomic code by simplifying SW completion

Handle atomic was left as unimplemented from 2013, remove the code till
this part will be developed.

Remove the dead code by simplifying SW completion logic which is supposed
to be the same for send and receive paths.

Fixes: e126ba97 ("mlx5: Add driver for Mellanox Connect-IB adapters")
Reported-by: NStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: NLeon Romanovsky <leonro@mellanox.com>
Tested-by: Stephen Rothwell <sfr@canb.auug.org.au> # compile tested
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 4785860e
......@@ -330,67 +330,6 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
dump_cqe(dev, cqe);
}
static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
{
/* TBD: waiting decision
*/
return 0;
}
static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
{
struct mlx5_wqe_data_seg *dpseg;
void *addr;
dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_atomic_seg);
addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
return addr;
}
static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
uint16_t idx)
{
void *addr;
int byte_count;
int i;
if (!is_atomic_response(qp, idx))
return;
byte_count = be32_to_cpu(cqe64->byte_cnt);
addr = mlx5_get_atomic_laddr(qp, idx);
if (byte_count == 4) {
*(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
} else {
for (i = 0; i < byte_count; i += 8) {
*(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
addr += 8;
}
}
return;
}
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
u16 tail, u16 head)
{
u16 idx;
do {
idx = tail & (qp->sq.wqe_cnt - 1);
handle_atomic(qp, cqe64, idx);
if (idx == head)
break;
tail = qp->sq.w_list[idx].next;
} while (1);
tail = qp->sq.w_list[idx].next;
qp->sq.last_poll = tail;
}
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
......@@ -428,45 +367,15 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
item->key = be32_to_cpu(cqe->mkey);
}
static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
struct ib_wc *wc, int *npolled)
{
struct mlx5_ib_wq *wq;
unsigned int cur;
unsigned int idx;
int np;
int i;
wq = &qp->sq;
cur = wq->head - wq->tail;
np = *npolled;
if (cur == 0)
return;
for (i = 0; i < cur && np < num_entries; i++) {
idx = wq->last_poll & (wq->wqe_cnt - 1);
wc->wr_id = wq->wrid[idx];
wc->status = IB_WC_WR_FLUSH_ERR;
wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
wq->tail++;
np++;
wc->qp = &qp->ibqp;
wc++;
wq->last_poll = wq->w_list[idx].next;
}
*npolled = np;
}
static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
struct ib_wc *wc, int *npolled)
static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
int *npolled, int is_send)
{
struct mlx5_ib_wq *wq;
unsigned int cur;
int np;
int i;
wq = &qp->rq;
wq = (is_send) ? &qp->sq : &qp->rq;
cur = wq->head - wq->tail;
np = *npolled;
......@@ -493,13 +402,13 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
*npolled = 0;
/* Find uncompleted WQEs belonging to that cq and return mmics ones */
list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
sw_send_comp(qp, num_entries, wc + *npolled, npolled);
sw_comp(qp, num_entries, wc + *npolled, npolled, true);
if (*npolled >= num_entries)
return;
}
list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
sw_comp(qp, num_entries, wc + *npolled, npolled, false);
if (*npolled >= num_entries)
return;
}
......@@ -567,7 +476,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
idx = wqe_ctr & (wq->wqe_cnt - 1);
handle_good_req(wc, cqe64, wq, idx);
handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
wc->wr_id = wq->wrid[idx];
wq->tail = wq->wqe_head[idx] + 1;
wc->status = IB_WC_SUCCESS;
......
......@@ -275,7 +275,6 @@ struct mlx5_ib_wq {
unsigned head;
unsigned tail;
u16 cur_post;
u16 last_poll;
void *cur_edge;
};
......@@ -1070,7 +1069,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length,
struct mlx5_ib_qp_base *base);
......
......@@ -3516,7 +3516,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
qp->sq.cur_post = 0;
if (qp->sq.wqe_cnt)
qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
qp->sq.last_poll = 0;
qp->db.db[MLX5_RCV_DBR] = 0;
qp->db.db[MLX5_SND_DBR] = 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册