提交 7dbf1297 编写于 作者: R Raju Rangoju 提交者: Greg Kroah-Hartman

nvmet-rdma: fix null dereference under heavy load

commit 5cbab6303b4791a3e6713dfe2c5fda6a867f9adc upstream.

Under heavy load if we don't have any pre-allocated rsps left, we
dynamically allocate a rsp, but we are not actually allocating memory
for nvme_completion (rsp->req.rsp). In such a case, accessing pointer
fields (req->rsp->status) in nvmet_req_init() will result in crash.

To fix this, allocate the memory for nvme_completion by calling
nvmet_rdma_alloc_rsp()

Fixes: 8407879c("nvmet-rdma:fix possible bogus dereference under heavy load")

Cc: <stable@vger.kernel.org>
Reviewed-by: NMax Gurtovoy <maxg@mellanox.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NRaju Rangoju <rajur@chelsio.com>
Signed-off-by: NSagi Grimberg <sagi@grimberg.me>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 fa9184be
...@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); ...@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r);
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r);
static const struct nvmet_fabrics_ops nvmet_rdma_ops; static const struct nvmet_fabrics_ops nvmet_rdma_ops;
...@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) ...@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
spin_unlock_irqrestore(&queue->rsps_lock, flags); spin_unlock_irqrestore(&queue->rsps_lock, flags);
if (unlikely(!rsp)) { if (unlikely(!rsp)) {
rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); int ret;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
if (unlikely(!rsp)) if (unlikely(!rsp))
return NULL; return NULL;
ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
if (unlikely(ret)) {
kfree(rsp);
return NULL;
}
rsp->allocated = true; rsp->allocated = true;
} }
...@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) ...@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
unsigned long flags; unsigned long flags;
if (unlikely(rsp->allocated)) { if (unlikely(rsp->allocated)) {
nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
kfree(rsp); kfree(rsp);
return; return;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册