提交 a153bf52 编写于 作者: P Paolo Bonzini 提交者: Stefan Hajnoczi

aio-posix: partially inline aio_dispatch into aio_poll

This patch prepares for the removal of unnecessary lockcnt inc/dec pairs.
Extract the dispatching loop for file descriptor handlers into a new
function aio_dispatch_handlers, and then inline aio_dispatch into
aio_poll.

aio_dispatch can now become void.
Reviewed-by: NStefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: NFam Zheng <famz@redhat.com>
Reviewed-by: NDaniel P. Berrange <berrange@redhat.com>
Message-id: 20170213135235.12274-17-pbonzini@redhat.com
Signed-off-by: NStefan Hajnoczi <stefanha@redhat.com>
上级 b9e413dd
......@@ -310,12 +310,8 @@ bool aio_pending(AioContext *ctx);
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
*
* This is used internally in the implementation of the GSource.
*
* @dispatch_fds: true to process fds, false to skip them
* (can be used as an optimization by callers that know there
* are no fds ready)
*/
bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
void aio_dispatch(AioContext *ctx);
/* Progress in completing AIO work to occur. This can issue new pending
* aio as a result of executing I/O completion or bh callbacks.
......
......@@ -386,12 +386,6 @@ static bool aio_dispatch_handlers(AioContext *ctx)
AioHandler *node, *tmp;
bool progress = false;
/*
* We have to walk very carefully in case aio_set_fd_handler is
* called while we're walking.
*/
qemu_lockcnt_inc(&ctx->list_lock);
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
int revents;
......@@ -426,33 +420,18 @@ static bool aio_dispatch_handlers(AioContext *ctx)
}
}
qemu_lockcnt_dec(&ctx->list_lock);
return progress;
}
/*
* Note that dispatch_fds == false has the side-effect of post-poning the
* freeing of deleted handlers.
*/
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
void aio_dispatch(AioContext *ctx)
{
bool progress;
/*
* If there are callbacks left that have been queued, we need to call them.
* Do not call select in this case, because it is possible that the caller
* does not need a complete flush (as is the case for aio_poll loops).
*/
progress = aio_bh_poll(ctx);
aio_bh_poll(ctx);
if (dispatch_fds) {
progress |= aio_dispatch_handlers(ctx);
}
/* Run our timers */
progress |= timerlistgroup_run_timers(&ctx->tlg);
qemu_lockcnt_inc(&ctx->list_lock);
aio_dispatch_handlers(ctx);
qemu_lockcnt_dec(&ctx->list_lock);
return progress;
timerlistgroup_run_timers(&ctx->tlg);
}
/* These thread-local variables are used only in a small part of aio_poll
......@@ -702,11 +681,16 @@ bool aio_poll(AioContext *ctx, bool blocking)
npfd = 0;
qemu_lockcnt_dec(&ctx->list_lock);
/* Run dispatch even if there were no readable fds to run timers */
if (aio_dispatch(ctx, ret > 0)) {
progress = true;
progress |= aio_bh_poll(ctx);
if (ret > 0) {
qemu_lockcnt_inc(&ctx->list_lock);
progress |= aio_dispatch_handlers(ctx);
qemu_lockcnt_dec(&ctx->list_lock);
}
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
}
......
......@@ -309,16 +309,11 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
return progress;
}
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
void aio_dispatch(AioContext *ctx)
{
bool progress;
progress = aio_bh_poll(ctx);
if (dispatch_fds) {
progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
}
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
aio_bh_poll(ctx);
aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
timerlistgroup_run_timers(&ctx->tlg);
}
bool aio_poll(AioContext *ctx, bool blocking)
......
......@@ -258,7 +258,7 @@ aio_ctx_dispatch(GSource *source,
AioContext *ctx = (AioContext *) source;
assert(callback == NULL);
aio_dispatch(ctx, true);
aio_dispatch(ctx);
return true;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册