From fa46710f79d11e3d642a90aca16c255c8e8b74d1 Mon Sep 17 00:00:00 2001 From: Jiaran Zhang Date: Thu, 12 Sep 2019 10:59:18 +0800 Subject: [PATCH] RDMA/hns: Bugfix for cqs lock dereferences the null cq driver inclusion category: bugfix bugzilla: NA CVE: NA The cq passed to hns_roce_lock_cqs() maybe a NULL pointer, this may result in access to an illegal address. Feature or Bugfix:Bugfix Signed-off-by: Jiaran Zhang Reviewed-by: chenglang Reviewed-by: liuyixian Reviewed-by: Yang Yingliang Signed-off-by: Yang Yingliang --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 19 ++++++++++++++----- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 15 ++++++++------- drivers/infiniband/hw/hns/hns_roce_qp.c | 22 ++++++++++++++++++++-- 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 6c0086beebaf..d9b5674ad539 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -3948,6 +3948,13 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); } +static void get_cqs(struct ib_qp *ibqp, struct hns_roce_cq **send_cq, + struct hns_roce_cq **recv_cq) +{ + *send_cq = ibqp->send_cq ? to_hr_cq(ibqp->send_cq) : NULL; + *recv_cq = ibqp->recv_cq ? to_hr_cq(ibqp->recv_cq) : NULL; +} + int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); @@ -3967,14 +3974,16 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) return ret; } - send_cq = to_hr_cq(hr_qp->ibqp.send_cq); - recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq); + get_cqs(&hr_qp->ibqp, &send_cq, &recv_cq); hns_roce_lock_cqs(send_cq, recv_cq); if (!is_user) { - __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : NULL); - if (send_cq != recv_cq) + if (recv_cq) + __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, + (hr_qp->ibqp.srq ? + to_hr_srq(hr_qp->ibqp.srq) : + NULL)); + if (send_cq && send_cq != recv_cq) __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); } hns_roce_unlock_cqs(send_cq, recv_cq); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index b5f9a6d63986..f84a9cc85d2a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5479,15 +5479,16 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, if (cq_lock) hns_roce_lock_cqs(send_cq, recv_cq); list_del(&hr_qp->list); - if (send_cq) - list_del(&hr_qp->send_list); - if (recv_cq) - list_del(&hr_qp->recv_list); + list_del(&hr_qp->send_list); + list_del(&hr_qp->recv_list); if (!is_user) { - __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : NULL); - if (send_cq != recv_cq) + if (recv_cq) + __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, + (hr_qp->ibqp.srq ? + to_hr_srq(hr_qp->ibqp.srq) : + NULL)); + if (send_cq && send_cq != recv_cq) __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index ee2b0f3112c1..3607b983a392 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1386,7 +1386,16 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (unlikely(!send_cq && !recv_cq)) { + __acquire(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq && !recv_cq)) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(!send_cq && recv_cq)) { + spin_lock_irq(&recv_cq->lock); + __acquire(&send_cq->lock); + } else if (send_cq == recv_cq) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { @@ -1403,7 +1412,16 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { - if (send_cq == recv_cq) { + if (unlikely(!send_cq && !recv_cq)) { + __release(&recv_cq->lock); + __release(&send_cq->lock); + } else if (unlikely(send_cq && !recv_cq)) { + spin_unlock(&send_cq->lock); + __release(&recv_cq->lock); + } else if (unlikely(!send_cq && recv_cq)) { + spin_unlock(&recv_cq->lock); + __release(&send_cq->lock); + } else if (send_cq == recv_cq) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->cqn < recv_cq->cqn) { -- GitLab