提交 bb236dbe 编写于 作者: L Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull more rdma fixes from Doug Ledford:
 "As per my previous pull request, there were two drivers that each had
  a rather large number of legitimate fixes still to be sent.

  As it turned out, I also missed a reasonably large set of fixes from
  one person across the stack that are all important fixes. All in all,
  the bnxt_re, i40iw, and Dan Carpenter are 3/4 to 2/3rds of this pull
  request.

  There were some other random fixes that I didn't send in the last pull
  request that I added to this one. This catches the rdma stack up to
  the fixes from up to about the beginning of this week. Any more fixes
  I'll wait and batch up later in the -rc cycle. This will give us a
  good base to start with for basing a for-next branch on -rc2.

  Summary:

   - i40iw fixes

   - bnxt_re fixes

   - Dan Carpenter bugfixes across stack

   - ten more random fixes, no more than two from any one person"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (37 commits)
  RDMA/core: Initialize port_num in qp_attr
  RDMA/uverbs: Fix the check for port number
  IB/cma: Fix reference count leak when no ipv4 addresses are set
  RDMA/iser: don't send an rkey if all data is written as immadiate-data
  rxe: fix broken receive queue draining
  RDMA/qedr: Prevent memory overrun in verbs' user responses
  iw_cxgb4: don't use WR keys/addrs for 0 byte reads
  IB/mlx4: Fix CM REQ retries in paravirt mode
  IB/rdmavt: Setting of QP timeout can overflow jiffies computation
  IB/core: Fix sparse warnings
  RDMA/bnxt_re: Fix the value reported for local ack delay
  RDMA/bnxt_re: Report MISSED_EVENTS in req_notify_cq
  RDMA/bnxt_re: Fix return value of poll routine
  RDMA/bnxt_re: Enable atomics only if host bios supports
  RDMA/bnxt_re: Specify RDMA component when allocating stats context
  RDMA/bnxt_re: Fixed the max_rd_atomic support for initiator and destination QP
  RDMA/bnxt_re: Report supported value to IB stack in query_device
  RDMA/bnxt_re: Do not free the ctx_tbl entry if delete GID fails
  RDMA/bnxt_re: Fix WQE Size posted to HW to prevent it from throwing error
  RDMA/bnxt_re: Free doorbell page index (DPI) during dealloc ucontext
  ...
...@@ -1033,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, ...@@ -1033,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
} else } else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask); qp_attr_mask);
qp_attr->port_num = id_priv->id.port_num;
*qp_attr_mask |= IB_QP_PORT;
} else } else
ret = -ENOSYS; ret = -ENOSYS;
......
...@@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, ...@@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
struct ib_uobject *uobj; struct ib_uobject *uobj;
struct ib_cq *cq; struct ib_cq *cq;
struct ib_ucq_object *obj; struct ib_ucq_object *obj;
struct ib_uverbs_event_queue *ev_queue;
int ret = -EINVAL; int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
...@@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, ...@@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
*/ */
uverbs_uobject_get(uobj); uverbs_uobject_get(uobj);
cq = uobj->object; cq = uobj->object;
ev_queue = cq->cq_context;
obj = container_of(cq->uobject, struct ib_ucq_object, uobject); obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
memset(&resp, 0, sizeof(resp)); memset(&resp, 0, sizeof(resp));
...@@ -1935,7 +1933,8 @@ static int modify_qp(struct ib_uverbs_file *file, ...@@ -1935,7 +1933,8 @@ static int modify_qp(struct ib_uverbs_file *file,
goto out; goto out;
} }
if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) { if ((cmd->base.attr_mask & IB_QP_PORT) &&
!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
ret = -EINVAL; ret = -EINVAL;
goto release_qp; goto release_qp;
} }
...@@ -2088,7 +2087,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, ...@@ -2088,7 +2087,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_qp cmd; struct ib_uverbs_destroy_qp cmd;
struct ib_uverbs_destroy_qp_resp resp; struct ib_uverbs_destroy_qp_resp resp;
struct ib_uobject *uobj; struct ib_uobject *uobj;
struct ib_qp *qp;
struct ib_uqp_object *obj; struct ib_uqp_object *obj;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -2102,7 +2100,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, ...@@ -2102,7 +2100,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
if (IS_ERR(uobj)) if (IS_ERR(uobj))
return PTR_ERR(uobj); return PTR_ERR(uobj);
qp = uobj->object;
obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
/* /*
* Make sure we don't free the memory in remove_commit as we still * Make sure we don't free the memory in remove_commit as we still
...@@ -3004,7 +3001,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, ...@@ -3004,7 +3001,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
{ {
struct ib_uverbs_ex_destroy_wq cmd = {}; struct ib_uverbs_ex_destroy_wq cmd = {};
struct ib_uverbs_ex_destroy_wq_resp resp = {}; struct ib_uverbs_ex_destroy_wq_resp resp = {};
struct ib_wq *wq;
struct ib_uobject *uobj; struct ib_uobject *uobj;
struct ib_uwq_object *obj; struct ib_uwq_object *obj;
size_t required_cmd_sz; size_t required_cmd_sz;
...@@ -3038,7 +3034,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, ...@@ -3038,7 +3034,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
if (IS_ERR(uobj)) if (IS_ERR(uobj))
return PTR_ERR(uobj); return PTR_ERR(uobj);
wq = uobj->object;
obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
/* /*
* Make sure we don't free the memory in remove_commit as we still * Make sure we don't free the memory in remove_commit as we still
...@@ -3728,10 +3723,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, ...@@ -3728,10 +3723,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_srq cmd; struct ib_uverbs_destroy_srq cmd;
struct ib_uverbs_destroy_srq_resp resp; struct ib_uverbs_destroy_srq_resp resp;
struct ib_uobject *uobj; struct ib_uobject *uobj;
struct ib_srq *srq;
struct ib_uevent_object *obj; struct ib_uevent_object *obj;
int ret = -EINVAL; int ret = -EINVAL;
enum ib_srq_type srq_type;
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
...@@ -3741,9 +3734,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, ...@@ -3741,9 +3734,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
if (IS_ERR(uobj)) if (IS_ERR(uobj))
return PTR_ERR(uobj); return PTR_ERR(uobj);
srq = uobj->object;
obj = container_of(uobj, struct ib_uevent_object, uobject); obj = container_of(uobj, struct ib_uevent_object, uobject);
srq_type = srq->srq_type;
/* /*
* Make sure we don't free the memory in remove_commit as we still * Make sure we don't free the memory in remove_commit as we still
* needs the uobject memory to create the response. * needs the uobject memory to create the response.
......
...@@ -51,6 +51,8 @@ ...@@ -51,6 +51,8 @@
#define BNXT_RE_PAGE_SIZE_8M BIT(23) #define BNXT_RE_PAGE_SIZE_8M BIT(23)
#define BNXT_RE_PAGE_SIZE_1G BIT(30) #define BNXT_RE_PAGE_SIZE_1G BIT(30)
#define BNXT_RE_MAX_MR_SIZE BIT(30)
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) #define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024) #define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
...@@ -60,6 +62,13 @@ ...@@ -60,6 +62,13 @@
#define BNXT_RE_RQ_WQE_THRESHOLD 32 #define BNXT_RE_RQ_WQE_THRESHOLD 32
/*
* Setting the default ack delay value to 16, which means
* the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
*/
#define BNXT_RE_DEFAULT_ACK_DELAY 16
struct bnxt_re_work { struct bnxt_re_work {
struct work_struct work; struct work_struct work;
unsigned long event; unsigned long event;
......
...@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ...@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver); ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
bnxt_qplib_get_guid(rdev->netdev->dev_addr, bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid); (u8 *)&ib_attr->sys_image_guid);
ib_attr->max_mr_size = ~0ull; ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K | ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor; ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device; ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
...@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev, ...@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_mr = dev_attr->max_mr; ib_attr->max_mr = dev_attr->max_mr;
ib_attr->max_pd = dev_attr->max_pd; ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom; ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
ib_attr->atomic_cap = IB_ATOMIC_HCA; if (dev_attr->is_atomic) {
ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; ib_attr->atomic_cap = IB_ATOMIC_HCA;
ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
}
ib_attr->max_ee_rd_atom = 0; ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0; ib_attr->max_res_rd_atom = 0;
...@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev, ...@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
ib_attr->max_pkeys = 1; ib_attr->max_pkeys = 1;
ib_attr->local_ca_ack_delay = 0; ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
return 0; return 0;
} }
...@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, ...@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
return -EINVAL; return -EINVAL;
ctx->refcnt--; ctx->refcnt--;
if (!ctx->refcnt) { if (!ctx->refcnt) {
rc = bnxt_qplib_del_sgid rc = bnxt_qplib_del_sgid(sgid_tbl,
(sgid_tbl, &sgid_tbl->tbl[ctx->idx],
&sgid_tbl->tbl[ctx->idx], true); true);
if (rc) if (rc) {
dev_err(rdev_to_dev(rdev), dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc); "Failed to remove GID: %#x", rc);
ctx_tbl = sgid_tbl->ctx; } else {
ctx_tbl[ctx->idx] = NULL; ctx_tbl = sgid_tbl->ctx;
kfree(ctx); ctx_tbl[ctx->idx] = NULL;
kfree(ctx);
}
} }
} else { } else {
return -EINVAL; return -EINVAL;
...@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) ...@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
/* Create a fence MW only for kernel consumers */ /* Create a fence MW only for kernel consumers */
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
if (!mw) { if (IS_ERR(mw)) {
dev_err(rdev_to_dev(rdev), dev_err(rdev_to_dev(rdev),
"Failed to create fence-MW for PD: %p\n", pd); "Failed to create fence-MW for PD: %p\n", pd);
rc = -EINVAL; rc = PTR_ERR(mw);
goto fail; goto fail;
} }
fence->mw = mw; fence->mw = mw;
...@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) ...@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
int rc; int rc;
bnxt_re_destroy_fence_mr(pd); bnxt_re_destroy_fence_mr(pd);
if (ib_pd->uobject && pd->dpi.dbr) {
struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
struct bnxt_re_ucontext *ucntx;
/* Free DPI only if this is the first PD allocated by the if (pd->qplib_pd.id) {
* application and mark the context dpi as NULL rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
*/ &rdev->qplib_res.pd_tbl,
ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); &pd->qplib_pd);
rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
&pd->dpi);
if (rc) if (rc)
dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI"); dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
/* Don't fail, continue*/
ucntx->dpi = NULL;
}
rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
&rdev->qplib_res.pd_tbl,
&pd->qplib_pd);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
return rc;
} }
kfree(pd); kfree(pd);
...@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, ...@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
if (udata) { if (udata) {
struct bnxt_re_pd_resp resp; struct bnxt_re_pd_resp resp;
if (!ucntx->dpi) { if (!ucntx->dpi.dbr) {
/* Allocate DPI in alloc_pd to avoid failing of /* Allocate DPI in alloc_pd to avoid failing of
* ibv_devinfo and family of application when DPIs * ibv_devinfo and family of application when DPIs
* are depleted. * are depleted.
*/ */
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
&pd->dpi, ucntx)) { &ucntx->dpi, ucntx)) {
rc = -ENOMEM; rc = -ENOMEM;
goto dbfail; goto dbfail;
} }
ucntx->dpi = &pd->dpi;
} }
resp.pdid = pd->qplib_pd.id; resp.pdid = pd->qplib_pd.id;
/* Still allow mapping this DBR to the new user PD. */ /* Still allow mapping this DBR to the new user PD. */
resp.dpi = ucntx->dpi->dpi; resp.dpi = ucntx->dpi.dpi;
resp.dbr = (u64)ucntx->dpi->umdbr; resp.dbr = (u64)ucntx->dpi.umdbr;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) { if (rc) {
...@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->rq.nmap = umem->nmap; qplib_qp->rq.nmap = umem->nmap;
} }
qplib_qp->dpi = cntx->dpi; qplib_qp->dpi = &cntx->dpi;
return 0; return 0;
rqfail: rqfail:
ib_umem_release(qp->sumem); ib_umem_release(qp->sumem);
...@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
qp->qplib_qp.modify_flags |= qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic; /* Cap the max_rd_atomic to device max */
qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
dev_attr->max_qp_rd_atom);
} }
if (qp_attr_mask & IB_QP_SQ_PSN) { if (qp_attr_mask & IB_QP_SQ_PSN) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
qp->qplib_qp.sq.psn = qp_attr->sq_psn; qp->qplib_qp.sq.psn = qp_attr->sq_psn;
} }
if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (qp_attr->max_dest_rd_atomic >
dev_attr->max_qp_init_rd_atom) {
dev_err(rdev_to_dev(rdev),
"max_dest_rd_atomic requested%d is > dev_max%d",
qp_attr->max_dest_rd_atomic,
dev_attr->max_qp_init_rd_atom);
return -EINVAL;
}
qp->qplib_qp.modify_flags |= qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
...@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, ...@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
} }
cq->qplib_cq.sghead = cq->umem->sg_head.sgl; cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
cq->qplib_cq.nmap = cq->umem->nmap; cq->qplib_cq.nmap = cq->umem->nmap;
cq->qplib_cq.dpi = uctx->dpi; cq->qplib_cq.dpi = &uctx->dpi;
} else { } else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
...@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) ...@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags); spin_lock_irqsave(&cq->cq_lock, flags);
budget = min_t(u32, num_entries, cq->max_cql); budget = min_t(u32, num_entries, cq->max_cql);
num_entries = budget;
if (!cq->cql) { if (!cq->cql) {
dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
goto exit; goto exit;
...@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, ...@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
else if (ib_cqn_flags & IB_CQ_SOLICITED) else if (ib_cqn_flags & IB_CQ_SOLICITED)
type = DBR_DBR_TYPE_CQ_ARMSE; type = DBR_DBR_TYPE_CQ_ARMSE;
/* Poll to see if there are missed events */
if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
!(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
return 1;
bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
return 0; return 0;
...@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct scatterlist *sg; struct scatterlist *sg;
int entry; int entry;
if (length > BNXT_RE_MAX_MR_SIZE) {
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
length, BNXT_RE_MAX_MR_SIZE);
return ERR_PTR(-ENOMEM);
}
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) ...@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
struct bnxt_re_ucontext *uctx = container_of(ib_uctx, struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
struct bnxt_re_ucontext, struct bnxt_re_ucontext,
ib_uctx); ib_uctx);
struct bnxt_re_dev *rdev = uctx->rdev;
int rc = 0;
if (uctx->shpg) if (uctx->shpg)
free_page((unsigned long)uctx->shpg); free_page((unsigned long)uctx->shpg);
if (uctx->dpi.dbr) {
/* Free DPI only if this is the first PD allocated by the
* application and mark the context dpi as NULL
*/
rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
&uctx->dpi);
if (rc)
dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
/* Don't fail, continue*/
uctx->dpi.dbr = NULL;
}
kfree(uctx); kfree(uctx);
return 0; return 0;
} }
......
...@@ -59,7 +59,6 @@ struct bnxt_re_pd { ...@@ -59,7 +59,6 @@ struct bnxt_re_pd {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_pd ib_pd; struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi;
struct bnxt_re_fence_data fence; struct bnxt_re_fence_data fence;
}; };
...@@ -127,7 +126,7 @@ struct bnxt_re_mw { ...@@ -127,7 +126,7 @@ struct bnxt_re_mw {
struct bnxt_re_ucontext { struct bnxt_re_ucontext {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_ucontext ib_uctx; struct ib_ucontext ib_uctx;
struct bnxt_qplib_dpi *dpi; struct bnxt_qplib_dpi dpi;
void *shpg; void *shpg;
spinlock_t sh_lock; /* protect shpg */ spinlock_t sh_lock; /* protect shpg */
}; };
......
...@@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, ...@@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000); req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map); req.stats_dma_addr = cpu_to_le64(dma_map);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
......
...@@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, ...@@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
} }
/* Each SGE entry = 1 WQE size16 */ /* Each SGE entry = 1 WQE size16 */
wqe_size16 = wqe->num_sge; wqe_size16 = wqe->num_sge;
/* HW requires wqe size has room for atleast one SGE even if
* none was supplied by ULP
*/
if (!wqe->num_sge)
wqe_size16++;
} }
/* Specifics */ /* Specifics */
...@@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, ...@@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
rqe->flags = wqe->flags; rqe->flags = wqe->flags;
rqe->wqe_size = wqe->num_sge + rqe->wqe_size = wqe->num_sge +
((offsetof(typeof(*rqe), data) + 15) >> 4); ((offsetof(typeof(*rqe), data) + 15) >> 4);
/* HW requires wqe size has room for atleast one SGE even if none
* was supplied by ULP
*/
if (!wqe->num_sge)
rqe->wqe_size++;
/* Supply the rqe->wr_id index to the wr_id_tbl for now */ /* Supply the rqe->wr_id index to the wr_id_tbl for now */
rqe->wr_id[0] = cpu_to_le32(sw_prod); rqe->wr_id[0] = cpu_to_le32(sw_prod);
...@@ -1885,6 +1895,25 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, ...@@ -1885,6 +1895,25 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
return rc; return rc;
} }
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
struct cq_base *hw_cqe, **hw_cqe_ptr;
unsigned long flags;
u32 sw_cons, raw_cons;
bool rc = true;
spin_lock_irqsave(&cq->hwq.lock, flags);
raw_cons = cq->hwq.cons;
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
/* Check for Valid bit. If the CQE is valid, return false */
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
return rc;
}
static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
struct cq_res_raweth_qp1 *hwcqe, struct cq_res_raweth_qp1 *hwcqe,
struct bnxt_qplib_cqe **pcqe, struct bnxt_qplib_cqe **pcqe,
......
...@@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); ...@@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num, struct bnxt_qplib_qp **qp); int num, struct bnxt_qplib_qp **qp);
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
......
...@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, ...@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 } }; 0, 0, 0, 0, 0, 0, 0, 0 } };
/* Device */ /* Device */
static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
{
int rc;
u16 pcie_ctl2;
rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
&pcie_ctl2);
if (rc)
return false;
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr) struct bnxt_qplib_dev_attr *attr)
{ {
...@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, ...@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */ /* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp); attr->max_qp = le32_to_cpu(sb->max_qp);
/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
attr->max_qp += 1;
attr->max_qp_rd_atom = attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
...@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, ...@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
} }
attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
bail: bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc; return rc;
......
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#define BNXT_QPLIB_RESERVED_QP_WRS 128 #define BNXT_QPLIB_RESERVED_QP_WRS 128
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
struct bnxt_qplib_dev_attr { struct bnxt_qplib_dev_attr {
char fw_ver[32]; char fw_ver[32];
u16 max_sgid; u16 max_sgid;
...@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr { ...@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr {
u32 max_inline_data; u32 max_inline_data;
u32 l2_db_size; u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
bool is_atomic;
}; };
struct bnxt_qplib_pd { struct bnxt_qplib_pd {
......
...@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, ...@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
struct iwch_mr *mhp; struct iwch_mr *mhp;
u32 mmid; u32 mmid;
u32 stag = 0; u32 stag = 0;
int ret = 0; int ret = -ENOMEM;
if (mr_type != IB_MR_TYPE_MEM_REG || if (mr_type != IB_MR_TYPE_MEM_REG ||
max_num_sg > T3_MAX_FASTREG_DEPTH) max_num_sg > T3_MAX_FASTREG_DEPTH)
...@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, ...@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
goto err; goto err;
mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
if (!mhp->pages) { if (!mhp->pages)
ret = -ENOMEM;
goto pl_err; goto pl_err;
}
mhp->rhp = rhp; mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, max_num_sg); ret = iwch_alloc_pbl(mhp, max_num_sg);
...@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, ...@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
mhp->attr.state = 1; mhp->attr.state = 1;
mmid = (stag) >> 8; mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag; mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
if (ret)
goto err3; goto err3;
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
......
...@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
goto err3; goto err3;
if (ucontext) { if (ucontext) {
ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL); mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm) if (!mm)
goto err4; goto err4;
......
...@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) ...@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{ {
if (wr->num_sge > 1) if (wr->num_sge > 1)
return -EINVAL; return -EINVAL;
if (wr->num_sge) { if (wr->num_sge && wr->sg_list[0].length) {
wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
>> 32)); >> 32));
......
...@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev, ...@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq); void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev); void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd); void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
void i40iw_rem_devusecount(struct i40iw_device *iwdev); void i40iw_rem_devusecount(struct i40iw_device *iwdev);
......
...@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp) ...@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
(original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
(last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
(last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
iwdev->reset)) {
issue_close = 1; issue_close = 1;
iwqp->cm_id = NULL; iwqp->cm_id = NULL;
if (!iwqp->flush_issued) { if (!iwqp->flush_issued) {
...@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev) ...@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry); cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
attr.qp_state = IB_QPS_ERR; attr.qp_state = IB_QPS_ERR;
i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
if (iwdev->reset)
i40iw_cm_disconn(cm_node->iwqp);
i40iw_rem_ref_cm_node(cm_node); i40iw_rem_ref_cm_node(cm_node);
} }
} }
......
...@@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq, ...@@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
} }
cqp->process_cqp_sds = i40iw_update_sds_noccq;
return ret_code; return ret_code;
} }
......
...@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) ...@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
if (free_hwcqp) if (free_hwcqp)
dev->cqp_ops->cqp_destroy(dev->cqp); dev->cqp_ops->cqp_destroy(dev->cqp);
i40iw_cleanup_pending_cqp_op(iwdev);
i40iw_free_dma_mem(dev->hw, &cqp->sq); i40iw_free_dma_mem(dev->hw, &cqp->sq);
kfree(cqp->scratch_array); kfree(cqp->scratch_array);
iwdev->cqp.scratch_array = NULL; iwdev->cqp.scratch_array = NULL;
...@@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev, ...@@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
/** /**
* i40iw_destroy_aeq - destroy aeq * i40iw_destroy_aeq - destroy aeq
* @iwdev: iwarp device * @iwdev: iwarp device
* @reset: true if called before reset
* *
* Issue a destroy aeq request and * Issue a destroy aeq request and
* free the resources associated with the aeq * free the resources associated with the aeq
* The function is called during driver unload * The function is called during driver unload
*/ */
static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
{ {
enum i40iw_status_code status = I40IW_ERR_NOT_READY; enum i40iw_status_code status = I40IW_ERR_NOT_READY;
struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_sc_dev *dev = &iwdev->sc_dev;
...@@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) ...@@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
if (!iwdev->msix_shared) if (!iwdev->msix_shared)
i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
if (reset) if (iwdev->reset)
goto exit; goto exit;
if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
...@@ -304,19 +305,17 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) ...@@ -304,19 +305,17 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
* i40iw_destroy_ceq - destroy ceq * i40iw_destroy_ceq - destroy ceq
* @iwdev: iwarp device * @iwdev: iwarp device
* @iwceq: ceq to be destroyed * @iwceq: ceq to be destroyed
* @reset: true if called before reset
* *
* Issue a destroy ceq request and * Issue a destroy ceq request and
* free the resources associated with the ceq * free the resources associated with the ceq
*/ */
static void i40iw_destroy_ceq(struct i40iw_device *iwdev, static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
struct i40iw_ceq *iwceq, struct i40iw_ceq *iwceq)
bool reset)
{ {
enum i40iw_status_code status; enum i40iw_status_code status;
struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_sc_dev *dev = &iwdev->sc_dev;
if (reset) if (iwdev->reset)
goto exit; goto exit;
status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
...@@ -335,12 +334,11 @@ static void i40iw_destroy_ceq(struct i40iw_device *iwdev, ...@@ -335,12 +334,11 @@ static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
/** /**
* i40iw_dele_ceqs - destroy all ceq's * i40iw_dele_ceqs - destroy all ceq's
* @iwdev: iwarp device * @iwdev: iwarp device
* @reset: true if called before reset
* *
* Go through all of the device ceq's and for each ceq * Go through all of the device ceq's and for each ceq
* disable the ceq interrupt and destroy the ceq * disable the ceq interrupt and destroy the ceq
*/ */
static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
{ {
u32 i = 0; u32 i = 0;
struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_sc_dev *dev = &iwdev->sc_dev;
...@@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) ...@@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
if (iwdev->msix_shared) { if (iwdev->msix_shared) {
i40iw_disable_irq(dev, msix_vec, (void *)iwdev); i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
i40iw_destroy_ceq(iwdev, iwceq, reset); i40iw_destroy_ceq(iwdev, iwceq);
iwceq++; iwceq++;
i++; i++;
} }
for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
i40iw_disable_irq(dev, msix_vec, (void *)iwceq); i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
i40iw_destroy_ceq(iwdev, iwceq, reset); i40iw_destroy_ceq(iwdev, iwceq);
} }
} }
/** /**
* i40iw_destroy_ccq - destroy control cq * i40iw_destroy_ccq - destroy control cq
* @iwdev: iwarp device * @iwdev: iwarp device
* @reset: true if called before reset
* *
* Issue destroy ccq request and * Issue destroy ccq request and
* free the resources associated with the ccq * free the resources associated with the ccq
*/ */
static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset) static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
{ {
struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_ccq *ccq = &iwdev->ccq; struct i40iw_ccq *ccq = &iwdev->ccq;
enum i40iw_status_code status = 0; enum i40iw_status_code status = 0;
if (!reset) if (!iwdev->reset)
status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
if (status) if (status)
i40iw_pr_err("ccq destroy failed %d\n", status); i40iw_pr_err("ccq destroy failed %d\n", status);
...@@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev, ...@@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
iwceq->msix_idx = msix_vec->idx; iwceq->msix_idx = msix_vec->idx;
status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
if (status) { if (status) {
i40iw_destroy_ceq(iwdev, iwceq, false); i40iw_destroy_ceq(iwdev, iwceq);
break; break;
} }
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
...@@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev) ...@@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
status = i40iw_configure_aeq_vector(iwdev); status = i40iw_configure_aeq_vector(iwdev);
if (status) { if (status) {
i40iw_destroy_aeq(iwdev, false); i40iw_destroy_aeq(iwdev);
return status; return status;
} }
...@@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, ...@@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
/** /**
* i40iw_deinit_device - clean up the device resources * i40iw_deinit_device - clean up the device resources
* @iwdev: iwarp device * @iwdev: iwarp device
* @reset: true if called before reset
* *
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
* destroy the device queues and free the pble and the hmc objects * destroy the device queues and free the pble and the hmc objects
*/ */
static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) static void i40iw_deinit_device(struct i40iw_device *iwdev)
{ {
struct i40e_info *ldev = iwdev->ldev; struct i40e_info *ldev = iwdev->ldev;
...@@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) ...@@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
i40iw_destroy_rdma_device(iwdev->iwibdev); i40iw_destroy_rdma_device(iwdev->iwibdev);
/* fallthrough */ /* fallthrough */
case IP_ADDR_REGISTERED: case IP_ADDR_REGISTERED:
if (!reset) if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */ /* fallthrough */
case INET_NOTIFIER: case INET_NOTIFIER:
...@@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) ...@@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
} }
/* fallthrough */ /* fallthrough */
case PBLE_CHUNK_MEM:
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
/* fallthrough */
case CEQ_CREATED: case CEQ_CREATED:
i40iw_dele_ceqs(iwdev, reset); i40iw_dele_ceqs(iwdev);
/* fallthrough */ /* fallthrough */
case AEQ_CREATED: case AEQ_CREATED:
i40iw_destroy_aeq(iwdev, reset); i40iw_destroy_aeq(iwdev);
/* fallthrough */ /* fallthrough */
case IEQ_CREATED: case IEQ_CREATED:
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset); i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
/* fallthrough */ /* fallthrough */
case ILQ_CREATED: case ILQ_CREATED:
i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset); i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
/* fallthrough */ /* fallthrough */
case CCQ_CREATED: case CCQ_CREATED:
i40iw_destroy_ccq(iwdev, reset); i40iw_destroy_ccq(iwdev);
/* fallthrough */
case PBLE_CHUNK_MEM:
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
/* fallthrough */ /* fallthrough */
case HMC_OBJS_CREATED: case HMC_OBJS_CREATED:
i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset); i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
/* fallthrough */ /* fallthrough */
case CQP_CREATED: case CQP_CREATED:
i40iw_destroy_cqp(iwdev, true); i40iw_destroy_cqp(iwdev, true);
...@@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) ...@@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status) if (status)
break; break;
iwdev->init_state = PBLE_CHUNK_MEM;
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
i40iw_register_notifiers(); i40iw_register_notifiers();
iwdev->init_state = INET_NOTIFIER; iwdev->init_state = INET_NOTIFIER;
...@@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) ...@@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
} while (0); } while (0);
i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
i40iw_deinit_device(iwdev, false); i40iw_deinit_device(iwdev);
return -ERESTART; return -ERESTART;
} }
...@@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool ...@@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
iwdev = &hdl->device; iwdev = &hdl->device;
iwdev->closing = true; iwdev->closing = true;
if (reset)
iwdev->reset = true;
i40iw_cm_disconnect_all(iwdev); i40iw_cm_disconnect_all(iwdev);
destroy_workqueue(iwdev->virtchnl_wq); destroy_workqueue(iwdev->virtchnl_wq);
i40iw_deinit_device(iwdev, reset); i40iw_deinit_device(iwdev);
} }
/** /**
......
...@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, ...@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 0, info->paddr); set_64bit_val(wqe, 0, info->paddr);
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]); set_64bit_val(wqe, 16, header[0]);
/* Ensure all data is written before writing valid bit */
wmb();
set_64bit_val(wqe, 24, header[1]); set_64bit_val(wqe, 24, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
...@@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, ...@@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
if (!list_empty(rxlist)) { if (!list_empty(rxlist)) {
tmpbuf = (struct i40iw_puda_buf *)rxlist->next; tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
plist = &tmpbuf->list;
while ((struct list_head *)tmpbuf != rxlist) { while ((struct list_head *)tmpbuf != rxlist) {
if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
break; break;
plist = &tmpbuf->list;
tmpbuf = (struct i40iw_puda_buf *)plist->next; tmpbuf = (struct i40iw_puda_buf *)plist->next;
} }
/* Insert buf before tmpbuf */ /* Insert buf before tmpbuf */
......
...@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait ...@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
*/ */
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
{ {
struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
unsigned long flags; unsigned long flags;
if (cqp_request->dynamic) { if (cqp_request->dynamic) {
...@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp ...@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
spin_unlock_irqrestore(&cqp->req_lock, flags); spin_unlock_irqrestore(&cqp->req_lock, flags);
} }
wake_up(&iwdev->close_wq);
} }
/** /**
...@@ -364,6 +366,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp, ...@@ -364,6 +366,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
i40iw_free_cqp_request(cqp, cqp_request); i40iw_free_cqp_request(cqp, cqp_request);
} }
/**
* i40iw_free_pending_cqp_request -free pending cqp request objs
* @cqp: cqp ptr
* @cqp_request: to be put back in cqp list
*/
static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
struct i40iw_cqp_request *cqp_request)
{
struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
if (cqp_request->waiting) {
cqp_request->compl_info.error = true;
cqp_request->request_done = true;
wake_up(&cqp_request->waitq);
}
i40iw_put_cqp_request(cqp, cqp_request);
wait_event_timeout(iwdev->close_wq,
!atomic_read(&cqp_request->refcount),
1000);
}
/**
* i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
* @iwdev: iwarp device
*/
void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_cqp *cqp = &iwdev->cqp;
struct i40iw_cqp_request *cqp_request = NULL;
struct cqp_commands_info *pcmdinfo = NULL;
u32 i, pending_work, wqe_idx;
pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
for (i = 0; i < pending_work; i++) {
cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
if (cqp_request)
i40iw_free_pending_cqp_request(cqp, cqp_request);
wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
}
while (!list_empty(&dev->cqp_cmd_head)) {
pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
if (cqp_request)
i40iw_free_pending_cqp_request(cqp, cqp_request);
}
}
/** /**
* i40iw_free_qp - callback after destroy cqp completes * i40iw_free_qp - callback after destroy cqp completes
* @cqp_request: cqp request for destroy qp * @cqp_request: cqp request for destroy qp
...@@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp) ...@@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.qp_destroy.remove_hash_idx = true; cqp_info->in.u.qp_destroy.remove_hash_idx = true;
status = i40iw_handle_cqp_op(iwdev, cqp_request); status = i40iw_handle_cqp_op(iwdev, cqp_request);
if (status) if (!status)
i40iw_pr_err("CQP-OP Destroy QP fail"); return;
i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
i40iw_rem_devusecount(iwdev);
} }
/** /**
......
...@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev, ...@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp, struct i40iw_qp *iwqp,
u32 qp_num) u32 qp_num)
{ {
struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num) if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
if (iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
kfree(iwqp->kqp.wrid_mem); kfree(iwqp->kqp.wrid_mem);
...@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev, ...@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp, struct i40iw_qp *iwqp,
struct i40iw_qp_init_info *init_info) struct i40iw_qp_init_info *init_info)
{ {
struct i40iw_pbl *iwpbl = iwqp->iwpbl; struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
iwqp->page = qpmr->sq_page; iwqp->page = qpmr->sq_page;
...@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, ...@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
ucontext = to_ucontext(ibpd->uobject->context); ucontext = to_ucontext(ibpd->uobject->context);
if (req.user_wqe_buffers) { if (req.user_wqe_buffers) {
struct i40iw_pbl *iwpbl;
spin_lock_irqsave( spin_lock_irqsave(
&ucontext->qp_reg_mem_list_lock, flags); &ucontext->qp_reg_mem_list_lock, flags);
iwqp->iwpbl = i40iw_get_pbl( iwpbl = i40iw_get_pbl(
(unsigned long)req.user_wqe_buffers, (unsigned long)req.user_wqe_buffers,
&ucontext->qp_reg_mem_list); &ucontext->qp_reg_mem_list);
spin_unlock_irqrestore( spin_unlock_irqrestore(
&ucontext->qp_reg_mem_list_lock, flags); &ucontext->qp_reg_mem_list_lock, flags);
if (!iwqp->iwpbl) { if (!iwpbl) {
err_code = -ENODATA; err_code = -ENODATA;
i40iw_pr_err("no pbl info\n"); i40iw_pr_err("no pbl info\n");
goto error; goto error;
} }
memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
} }
} }
err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
...@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, ...@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
iwcq->user_mode = true; iwcq->user_mode = true;
ucontext = to_ucontext(context); ucontext = to_ucontext(context);
if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
err_code = -EFAULT;
goto cq_free_resources; goto cq_free_resources;
}
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
...@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr) ...@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
ucontext = to_ucontext(ibpd->uobject->context); ucontext = to_ucontext(ibpd->uobject->context);
i40iw_del_memlist(iwmr, ucontext); i40iw_del_memlist(iwmr, ucontext);
} }
if (iwpbl->pbl_allocated) if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
i40iw_free_pble(iwdev->pble_rsrc, palloc); i40iw_free_pble(iwdev->pble_rsrc, palloc);
kfree(iwmr); kfree(iwmr);
return 0; return 0;
......
...@@ -170,7 +170,7 @@ struct i40iw_qp { ...@@ -170,7 +170,7 @@ struct i40iw_qp {
struct i40iw_qp_kmode kqp; struct i40iw_qp_kmode kqp;
struct i40iw_dma_mem host_ctx; struct i40iw_dma_mem host_ctx;
struct timer_list terminate_timer; struct timer_list terminate_timer;
struct i40iw_pbl *iwpbl; struct i40iw_pbl iwpbl;
struct i40iw_dma_mem q2_ctx_mem; struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem; struct i40iw_dma_mem ietf_mem;
struct completion sq_drained; struct completion sq_drained;
......
...@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id ...@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
mad->mad_hdr.attr_id == CM_REP_ATTR_ID || mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
sl_cm_id = get_local_comm_id(mad); sl_cm_id = get_local_comm_id(mad);
id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
if (id)
goto cont;
id = id_map_alloc(ibdev, slave_id, sl_cm_id); id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) { if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
...@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id ...@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
return -EINVAL; return -EINVAL;
} }
cont:
set_local_comm_id(mad, id->pv_cm_id); set_local_comm_id(mad, id->pv_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
......
...@@ -835,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, ...@@ -835,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
access_flags, 0); access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem); err = PTR_ERR_OR_ZERO(*umem);
if (err < 0) { if (err < 0) {
mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); mlx5_ib_err(dev, "umem get failed (%d)\n", err);
return err; return err;
} }
......
...@@ -744,7 +744,8 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, ...@@ -744,7 +744,8 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
if (is_uctx_pd) { if (is_uctx_pd) {
ocrdma_release_ucontext_pd(uctx); ocrdma_release_ucontext_pd(uctx);
} else { } else {
status = _ocrdma_dealloc_pd(dev, pd); if (_ocrdma_dealloc_pd(dev, pd))
pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
} }
exit: exit:
return ERR_PTR(status); return ERR_PTR(status);
...@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, ...@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto err; goto err;
if (udata == NULL) { if (udata == NULL) {
status = -ENOMEM;
srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
GFP_KERNEL); GFP_KERNEL);
if (srq->rqe_wr_id_tbl == NULL) if (srq->rqe_wr_id_tbl == NULL)
......
...@@ -53,6 +53,14 @@ ...@@ -53,6 +53,14 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
size_t len)
{
size_t min_len = min_t(size_t, len, udata->outlen);
return ib_copy_to_udata(udata, src, min_len);
}
int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{ {
if (index > QEDR_ROCE_PKEY_TABLE_LEN) if (index > QEDR_ROCE_PKEY_TABLE_LEN)
...@@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, ...@@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
uresp.sges_per_srq_wr = dev->attr.max_srq_sge; uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
uresp.max_cqes = QEDR_MAX_CQES; uresp.max_cqes = QEDR_MAX_CQES;
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) if (rc)
goto err; goto err;
...@@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, ...@@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
uresp.pd_id = pd_id; uresp.pd_id = pd_id;
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) { if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
...@@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev, ...@@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev,
uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
uresp.icid = cq->icid; uresp.icid = cq->icid;
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) if (rc)
DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
...@@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev, ...@@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp.qp_id = qp->qp_id; uresp.qp_id = qp->qp_id;
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) if (rc)
DP_ERR(dev, DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n", "create qp: failed a copy to user space with qp icid=0x%x.\n",
......
...@@ -1258,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1258,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_TIMEOUT) { if (attr_mask & IB_QP_TIMEOUT) {
qp->timeout = attr->timeout; qp->timeout = attr->timeout;
qp->timeout_jiffies = qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1000UL);
} }
if (attr_mask & IB_QP_QKEY) if (attr_mask & IB_QP_QKEY)
......
...@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) ...@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
kfree_skb(skb); kfree_skb(skb);
} }
if (notify)
return;
while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue)) while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
advance_consumer(qp->rq.queue); advance_consumer(qp->rq.queue);
} }
......
...@@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags); spin_unlock_irqrestore(&rq->producer_lock, flags);
if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
err1: err1:
return err; return err;
} }
......
...@@ -2239,6 +2239,7 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -2239,6 +2239,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto register_failed; goto register_failed;
} }
result = -ENOMEM;
if (ipoib_cm_add_mode_attr(priv->dev)) if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed; goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev)) if (ipoib_add_pkey_attr(priv->dev))
......
...@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task, ...@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (unsol_sz < edtl) { if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV; hdr->flags |= ISER_WSV;
hdr->write_stag = cpu_to_be32(mem_reg->rkey); if (buf_out->data_len > imm_sz) {
hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); hdr->write_stag = cpu_to_be32(mem_reg->rkey);
hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
}
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n", "VA:%#llX + unsol:%d\n",
......
...@@ -205,11 +205,13 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, ...@@ -205,11 +205,13 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
if (dev) { if (dev) {
ip4 = in_dev_get(dev); ip4 = in_dev_get(dev);
if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) { if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address, ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
(struct in6_addr *)gid); (struct in6_addr *)gid);
if (ip4)
in_dev_put(ip4); in_dev_put(ip4);
}
dev_put(dev); dev_put(dev);
} }
} }
......
...@@ -647,6 +647,20 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) ...@@ -647,6 +647,20 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
return len >> qp->log_pmtu; return len >> qp->log_pmtu;
} }
/**
* rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
* @timeout - timeout input(0 - 31).
*
* Return a timeout value in jiffies.
*/
static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
{
if (timeout > 31)
timeout = 31;
return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
}
extern const int ib_rvt_state_ops[]; extern const int ib_rvt_state_ops[];
struct rvt_dev_info; struct rvt_dev_info;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册