提交 9ebbfe49 编写于 作者: C Chao Leng 提交者: Christoph Hellwig

nvme-tcp: avoid request double completion for concurrent nvme_tcp_timeout

Each name space has a request queue, if complete request long time,
multi request queues may have time out requests at the same time,
nvme_tcp_timeout will execute concurrently. Multi requests in different
request queues may be queued in the same tcp queue, multi
nvme_tcp_timeout may call nvme_tcp_stop_queue at the same time.
The first nvme_tcp_stop_queue will clear NVME_TCP_Q_LIVE and continue
stopping the tcp queue(cancel io_work), but the others check
NVME_TCP_Q_LIVE is already cleared, and then directly complete the
requests, complete request before the io work is completely canceled may
lead to a use-after-free condition.
Add a multex lock to serialize nvme_tcp_stop_queue.
Signed-off-by: NChao Leng <lengchao@huawei.com>
Signed-off-by: NChristoph Hellwig <hch@lst.de>
上级 7674073b
...@@ -76,6 +76,7 @@ struct nvme_tcp_queue { ...@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
struct work_struct io_work; struct work_struct io_work;
int io_cpu; int io_cpu;
struct mutex queue_lock;
struct mutex send_mutex; struct mutex send_mutex;
struct llist_head req_list; struct llist_head req_list;
struct list_head send_list; struct list_head send_list;
...@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) ...@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
sock_release(queue->sock); sock_release(queue->sock);
kfree(queue->pdu); kfree(queue->pdu);
mutex_destroy(&queue->queue_lock);
} }
static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
...@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, ...@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
struct nvme_tcp_queue *queue = &ctrl->queues[qid]; struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int ret, rcv_pdu_size; int ret, rcv_pdu_size;
mutex_init(&queue->queue_lock);
queue->ctrl = ctrl; queue->ctrl = ctrl;
init_llist_head(&queue->req_list); init_llist_head(&queue->req_list);
INIT_LIST_HEAD(&queue->send_list); INIT_LIST_HEAD(&queue->send_list);
...@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, ...@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
if (ret) { if (ret) {
dev_err(nctrl->device, dev_err(nctrl->device,
"failed to create socket: %d\n", ret); "failed to create socket: %d\n", ret);
return ret; goto err_destroy_mutex;
} }
/* Single syn retry */ /* Single syn retry */
...@@ -1507,6 +1510,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, ...@@ -1507,6 +1510,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
err_sock: err_sock:
sock_release(queue->sock); sock_release(queue->sock);
queue->sock = NULL; queue->sock = NULL;
err_destroy_mutex:
mutex_destroy(&queue->queue_lock);
return ret; return ret;
} }
...@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) ...@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid]; struct nvme_tcp_queue *queue = &ctrl->queues[qid];
if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) mutex_lock(&queue->queue_lock);
return; if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
__nvme_tcp_stop_queue(queue); __nvme_tcp_stop_queue(queue);
mutex_unlock(&queue->queue_lock);
} }
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册