提交 c7f2525a 编写于 作者: L Linus Torvalds 提交者: Greg Kroah-Hartman

pin iocb through aio.

commit b53119f13a04879c3bf502828d99d13726639ead upstream.

aio_poll() is not the only case that needs file pinned; worse, while
aio_read()/aio_write() can live without pinning iocb itself, the
proof is rather brittle and can easily break on later changes.
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
Cc: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 d6b2615f
...@@ -1016,6 +1016,9 @@ static bool get_reqs_available(struct kioctx *ctx) ...@@ -1016,6 +1016,9 @@ static bool get_reqs_available(struct kioctx *ctx)
/* aio_get_req /* aio_get_req
* Allocate a slot for an aio request. * Allocate a slot for an aio request.
* Returns NULL if no requests are free. * Returns NULL if no requests are free.
*
* The refcount is initialized to 2 - one for the async op completion,
* one for the synchronous code that does this.
*/ */
static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
{ {
...@@ -1028,7 +1031,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) ...@@ -1028,7 +1031,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
percpu_ref_get(&ctx->reqs); percpu_ref_get(&ctx->reqs);
req->ki_ctx = ctx; req->ki_ctx = ctx;
INIT_LIST_HEAD(&req->ki_list); INIT_LIST_HEAD(&req->ki_list);
refcount_set(&req->ki_refcnt, 0); refcount_set(&req->ki_refcnt, 2);
req->ki_eventfd = NULL; req->ki_eventfd = NULL;
return req; return req;
} }
...@@ -1061,15 +1064,18 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) ...@@ -1061,15 +1064,18 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
return ret; return ret;
} }
static inline void iocb_destroy(struct aio_kiocb *iocb)
{
if (iocb->ki_filp)
fput(iocb->ki_filp);
percpu_ref_put(&iocb->ki_ctx->reqs);
kmem_cache_free(kiocb_cachep, iocb);
}
static inline void iocb_put(struct aio_kiocb *iocb) static inline void iocb_put(struct aio_kiocb *iocb)
{ {
if (refcount_read(&iocb->ki_refcnt) == 0 || if (refcount_dec_and_test(&iocb->ki_refcnt))
refcount_dec_and_test(&iocb->ki_refcnt)) { iocb_destroy(iocb);
if (iocb->ki_filp)
fput(iocb->ki_filp);
percpu_ref_put(&iocb->ki_ctx->reqs);
kmem_cache_free(kiocb_cachep, iocb);
}
} }
static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
...@@ -1743,9 +1749,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) ...@@ -1743,9 +1749,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
INIT_LIST_HEAD(&req->wait.entry); INIT_LIST_HEAD(&req->wait.entry);
init_waitqueue_func_entry(&req->wait, aio_poll_wake); init_waitqueue_func_entry(&req->wait, aio_poll_wake);
/* one for removal from waitqueue, one for this function */
refcount_set(&aiocb->ki_refcnt, 2);
mask = vfs_poll(req->file, &apt.pt) & req->events; mask = vfs_poll(req->file, &apt.pt) & req->events;
if (unlikely(!req->head)) { if (unlikely(!req->head)) {
/* we did not manage to set up a waitqueue, done */ /* we did not manage to set up a waitqueue, done */
...@@ -1776,7 +1779,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) ...@@ -1776,7 +1779,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
if (mask) if (mask)
aio_poll_complete(aiocb, mask); aio_poll_complete(aiocb, mask);
iocb_put(aiocb);
return 0; return 0;
} }
...@@ -1867,18 +1869,21 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, ...@@ -1867,18 +1869,21 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
break; break;
} }
/* Done with the synchronous reference */
iocb_put(req);
/* /*
* If ret is 0, we'd either done aio_complete() ourselves or have * If ret is 0, we'd either done aio_complete() ourselves or have
* arranged for that to be done asynchronously. Anything non-zero * arranged for that to be done asynchronously. Anything non-zero
* means that we need to destroy req ourselves. * means that we need to destroy req ourselves.
*/ */
if (ret) if (!ret)
goto out_put_req; return 0;
return 0;
out_put_req: out_put_req:
if (req->ki_eventfd) if (req->ki_eventfd)
eventfd_ctx_put(req->ki_eventfd); eventfd_ctx_put(req->ki_eventfd);
iocb_put(req); iocb_destroy(req);
out_put_reqs_available: out_put_reqs_available:
put_reqs_available(ctx, 1); put_reqs_available(ctx, 1);
return ret; return ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册