提交 984824db 编写于 作者: C Christoph Hellwig 提交者: Jens Axboe

io_uring: don't use ERR_PTR for user pointers

ERR_PTR abuses the high bits of a pointer to transport error information.
This is only safe for kernel pointers and not user pointers.  Fix
io_buffer_select and its helpers to just return NULL for failure and get
rid of this abuse.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220518084005.3255380-3-hch@lst.deSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 20cbd21d
...@@ -3512,11 +3512,8 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx, ...@@ -3512,11 +3512,8 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
} }
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl, struct io_buffer_list *bl)
unsigned int issue_flags)
{ {
void __user *ret = ERR_PTR(-ENOBUFS);
if (!list_empty(&bl->buf_list)) { if (!list_empty(&bl->buf_list)) {
struct io_buffer *kbuf; struct io_buffer *kbuf;
...@@ -3527,11 +3524,9 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3527,11 +3524,9 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
req->flags |= REQ_F_BUFFER_SELECTED; req->flags |= REQ_F_BUFFER_SELECTED;
req->kbuf = kbuf; req->kbuf = kbuf;
req->buf_index = kbuf->bid; req->buf_index = kbuf->bid;
ret = u64_to_user_ptr(kbuf->addr); return u64_to_user_ptr(kbuf->addr);
} }
return NULL;
io_ring_submit_unlock(req->ctx, issue_flags);
return ret;
} }
static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
...@@ -3544,7 +3539,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3544,7 +3539,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
if (unlikely(smp_load_acquire(&br->tail) == head)) { if (unlikely(smp_load_acquire(&br->tail) == head)) {
io_ring_submit_unlock(req->ctx, issue_flags); io_ring_submit_unlock(req->ctx, issue_flags);
return ERR_PTR(-ENOBUFS); return NULL;
} }
head &= bl->mask; head &= bl->mask;
...@@ -3562,22 +3557,19 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3562,22 +3557,19 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
req->buf_list = bl; req->buf_list = bl;
req->buf_index = buf->bid; req->buf_index = buf->bid;
if (!(issue_flags & IO_URING_F_UNLOCKED)) if (issue_flags & IO_URING_F_UNLOCKED) {
return u64_to_user_ptr(buf->addr); /*
* If we came in unlocked, we have no choice but to consume the
/* * buffer here. This does mean it'll be pinned until the IO
* If we came in unlocked, we have no choice but to * completes. But coming in unlocked means we're in io-wq
* consume the buffer here. This does mean it'll be * context, hence there should be no further retry. For the
* pinned until the IO completes. But coming in * locked case, the caller must ensure to call the commit when
* unlocked means we're in io-wq context, hence there * the transfer completes (or if we get -EAGAIN and must poll
* should be no further retry. For the locked case, the * or retry).
* caller must ensure to call the commit when the */
* transfer completes (or if we get -EAGAIN and must req->buf_list = NULL;
* poll or retry). bl->head++;
*/ }
req->buf_list = NULL;
bl->head++;
io_ring_submit_unlock(req->ctx, issue_flags);
return u64_to_user_ptr(buf->addr); return u64_to_user_ptr(buf->addr);
} }
...@@ -3586,20 +3578,19 @@ static void __user *io_buffer_select(struct io_kiocb *req, size_t *len, ...@@ -3586,20 +3578,19 @@ static void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl; struct io_buffer_list *bl;
void __user *ret = NULL;
io_ring_submit_lock(req->ctx, issue_flags); io_ring_submit_lock(req->ctx, issue_flags);
bl = io_buffer_get_list(ctx, req->buf_index); bl = io_buffer_get_list(ctx, req->buf_index);
if (unlikely(!bl)) { if (likely(bl)) {
io_ring_submit_unlock(req->ctx, issue_flags); if (bl->buf_nr_pages)
return ERR_PTR(-ENOBUFS); ret = io_ring_buffer_select(req, len, bl, issue_flags);
else
ret = io_provided_buffer_select(req, len, bl);
} }
io_ring_submit_unlock(req->ctx, issue_flags);
/* selection helpers drop the submit lock again, if needed */ return ret;
if (bl->buf_nr_pages)
return io_ring_buffer_select(req, len, bl, issue_flags);
return io_provided_buffer_select(req, len, bl, issue_flags);
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -3621,8 +3612,8 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, ...@@ -3621,8 +3612,8 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
len = clen; len = clen;
buf = io_buffer_select(req, &len, issue_flags); buf = io_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf)) if (!buf)
return PTR_ERR(buf); return -ENOBUFS;
req->rw.addr = (unsigned long) buf; req->rw.addr = (unsigned long) buf;
iov[0].iov_base = buf; iov[0].iov_base = buf;
req->rw.len = iov[0].iov_len = (compat_size_t) len; req->rw.len = iov[0].iov_len = (compat_size_t) len;
...@@ -3644,8 +3635,8 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, ...@@ -3644,8 +3635,8 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
if (len < 0) if (len < 0)
return -EINVAL; return -EINVAL;
buf = io_buffer_select(req, &len, issue_flags); buf = io_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf)) if (!buf)
return PTR_ERR(buf); return -ENOBUFS;
req->rw.addr = (unsigned long) buf; req->rw.addr = (unsigned long) buf;
iov[0].iov_base = buf; iov[0].iov_base = buf;
req->rw.len = iov[0].iov_len = len; req->rw.len = iov[0].iov_len = len;
...@@ -3702,8 +3693,8 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, ...@@ -3702,8 +3693,8 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
if (io_do_buffer_select(req)) { if (io_do_buffer_select(req)) {
buf = io_buffer_select(req, &sqe_len, issue_flags); buf = io_buffer_select(req, &sqe_len, issue_flags);
if (IS_ERR(buf)) if (!buf)
return ERR_CAST(buf); return ERR_PTR(-ENOBUFS);
req->rw.addr = (unsigned long) buf; req->rw.addr = (unsigned long) buf;
req->rw.len = sqe_len; req->rw.len = sqe_len;
} }
...@@ -4642,8 +4633,8 @@ static int io_nop(struct io_kiocb *req, unsigned int issue_flags) ...@@ -4642,8 +4633,8 @@ static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
size_t len = 1; size_t len = 1;
buf = io_buffer_select(req, &len, issue_flags); buf = io_buffer_select(req, &len, issue_flags);
if (IS_ERR(buf)) if (!buf)
return PTR_ERR(buf); return -ENOBUFS;
} }
__io_req_complete(req, issue_flags, 0, io_put_kbuf(req, issue_flags)); __io_req_complete(req, issue_flags, 0, io_put_kbuf(req, issue_flags));
...@@ -5779,8 +5770,8 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5779,8 +5770,8 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
void __user *buf; void __user *buf;
buf = io_buffer_select(req, &sr->len, issue_flags); buf = io_buffer_select(req, &sr->len, issue_flags);
if (IS_ERR(buf)) if (!buf)
return PTR_ERR(buf); return -ENOBUFS;
kmsg->fast_iov[0].iov_base = buf; kmsg->fast_iov[0].iov_base = buf;
kmsg->fast_iov[0].iov_len = sr->len; kmsg->fast_iov[0].iov_len = sr->len;
iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1, iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
...@@ -5843,8 +5834,8 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) ...@@ -5843,8 +5834,8 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
void __user *buf; void __user *buf;
buf = io_buffer_select(req, &sr->len, issue_flags); buf = io_buffer_select(req, &sr->len, issue_flags);
if (IS_ERR(buf)) if (!buf)
return PTR_ERR(buf); return -ENOBUFS;
sr->buf = buf; sr->buf = buf;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册