提交 f627aab1 编写于 作者: P Paolo Bonzini

aio: introduce AioContext, move bottom halves there

Start introducing AioContext, which will let us remove globals from
aio.c/async.c, and introduce multiple I/O threads.

The bottom half functions now take an additional AioContext argument.
A bottom half is created with a specific AioContext that remains the
same throughout the lifetime.  qemu_bh_new is just a wrapper that
uses a global context.
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 9958c351
......@@ -18,8 +18,6 @@
#include "qemu-queue.h"
#include "qemu_socket.h"
typedef struct AioHandler AioHandler;
/* The list of registered AIO handlers */
static QLIST_HEAD(, AioHandler) aio_handlers;
......
......@@ -26,9 +26,6 @@
#include "qemu-aio.h"
#include "main-loop.h"
/* Anchor of the list of Bottom Halves belonging to the context */
static struct QEMUBH *first_bh;
/***********************************************************/
/* bottom halves (can be seen as timers which expire ASAP) */
......@@ -41,27 +38,26 @@ struct QEMUBH {
bool deleted;
};
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
{
QEMUBH *bh;
bh = g_malloc0(sizeof(QEMUBH));
bh->cb = cb;
bh->opaque = opaque;
bh->next = first_bh;
first_bh = bh;
bh->next = ctx->first_bh;
ctx->first_bh = bh;
return bh;
}
int qemu_bh_poll(void)
int aio_bh_poll(AioContext *ctx)
{
QEMUBH *bh, **bhp, *next;
int ret;
static int nesting = 0;
nesting++;
ctx->walking_bh++;
ret = 0;
for (bh = first_bh; bh; bh = next) {
for (bh = ctx->first_bh; bh; bh = next) {
next = bh->next;
if (!bh->deleted && bh->scheduled) {
bh->scheduled = 0;
......@@ -72,11 +68,11 @@ int qemu_bh_poll(void)
}
}
nesting--;
ctx->walking_bh--;
/* remove deleted bhs */
if (!nesting) {
bhp = &first_bh;
if (!ctx->walking_bh) {
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
if (bh->deleted) {
......@@ -120,11 +116,11 @@ void qemu_bh_delete(QEMUBH *bh)
bh->deleted = 1;
}
void qemu_bh_update_timeout(uint32_t *timeout)
void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout)
{
QEMUBH *bh;
for (bh = first_bh; bh; bh = bh->next) {
for (bh = ctx->first_bh; bh; bh = bh->next) {
if (!bh->deleted && bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
......@@ -140,3 +136,7 @@ void qemu_bh_update_timeout(uint32_t *timeout)
}
}
AioContext *aio_context_new(void)
{
return g_new0(AioContext, 1);
}
......@@ -10,6 +10,7 @@
#include "ioport.h"
#include "irq.h"
#include "qemu-aio.h"
#include "qemu-file.h"
#include "vmstate.h"
#include "qemu-log.h"
......
......@@ -26,6 +26,7 @@
#include "qemu-common.h"
#include "qemu-char.h"
#include "qemu-queue.h"
#include "qemu-aio.h"
#include "main-loop.h"
#ifndef _WIN32
......
......@@ -9,6 +9,7 @@
*/
#include "qemu-common.h"
#include "qemu-aio.h"
#include "qemu-queue.h"
#include "block/raw-posix-aio.h"
#include <sys/eventfd.h>
......
......@@ -26,6 +26,7 @@
#include "qemu-timer.h"
#include "slirp/slirp.h"
#include "main-loop.h"
#include "qemu-aio.h"
#ifndef _WIN32
......@@ -199,6 +200,8 @@ static int qemu_signal_init(void)
}
#endif
static AioContext *qemu_aio_context;
int qemu_init_main_loop(void)
{
int ret;
......@@ -218,6 +221,7 @@ int qemu_init_main_loop(void)
return ret;
}
qemu_aio_context = aio_context_new();
return 0;
}
......@@ -481,7 +485,7 @@ int main_loop_wait(int nonblocking)
if (nonblocking) {
timeout = 0;
} else {
qemu_bh_update_timeout(&timeout);
aio_bh_update_timeout(qemu_aio_context, &timeout);
}
/* poll any events */
......@@ -510,3 +514,15 @@ int main_loop_wait(int nonblocking)
return ret;
}
/* Functions to operate on the main QEMU AioContext. */
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
{
return aio_bh_new(qemu_aio_context, cb, opaque);
}
int qemu_bh_poll(void)
{
return aio_bh_poll(qemu_aio_context);
}
......@@ -25,6 +25,8 @@
#ifndef QEMU_MAIN_LOOP_H
#define QEMU_MAIN_LOOP_H 1
#include "qemu-aio.h"
#define SIG_IPI SIGUSR1
/**
......@@ -163,7 +165,6 @@ void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque);
typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size);
typedef int IOCanReadHandler(void *opaque);
typedef void IOHandler(void *opaque);
/**
* qemu_set_fd_handler2: Register a file descriptor with the main loop
......@@ -244,56 +245,6 @@ int qemu_set_fd_handler(int fd,
IOHandler *fd_write,
void *opaque);
typedef struct QEMUBH QEMUBH;
typedef void QEMUBHFunc(void *opaque);
/**
* qemu_bh_new: Allocate a new bottom half structure.
*
* Bottom halves are lightweight callbacks whose invocation is guaranteed
* to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
* is opaque and must be allocated prior to its use.
*/
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
/**
* qemu_bh_schedule: Schedule a bottom half.
*
* Scheduling a bottom half interrupts the main loop and causes the
* execution of the callback that was passed to qemu_bh_new.
*
* Bottom halves that are scheduled from a bottom half handler are instantly
* invoked. This can create an infinite loop if a bottom half handler
* schedules itself.
*
* @bh: The bottom half to be scheduled.
*/
void qemu_bh_schedule(QEMUBH *bh);
/**
* qemu_bh_cancel: Cancel execution of a bottom half.
*
* Canceling execution of a bottom half undoes the effect of calls to
* qemu_bh_schedule without freeing its resources yet. While cancellation
* itself is also wait-free and thread-safe, it can of course race with the
* loop that executes bottom halves unless you are holding the iothread
* mutex. This makes it mostly useless if you are not holding the mutex.
*
* @bh: The bottom half to be canceled.
*/
void qemu_bh_cancel(QEMUBH *bh);
/**
*qemu_bh_delete: Cancel execution of a bottom half and free its resources.
*
* Deleting a bottom half frees the memory that was allocated for it by
* qemu_bh_new. It also implies canceling the bottom half if it was
* scheduled.
*
* @bh: The bottom half to be deleted.
*/
void qemu_bh_delete(QEMUBH *bh);
#ifdef CONFIG_POSIX
/**
* qemu_add_child_watch: Register a child process for reaping.
......@@ -349,8 +300,8 @@ void qemu_fd_register(int fd);
void qemu_iohandler_fill(int *pnfds, fd_set *readfds, fd_set *writefds, fd_set *xfds);
void qemu_iohandler_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds, int rc);
QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque);
void qemu_bh_schedule_idle(QEMUBH *bh);
int qemu_bh_poll(void);
void qemu_bh_update_timeout(uint32_t *timeout);
#endif
......@@ -15,7 +15,6 @@
#define QEMU_AIO_H
#include "qemu-common.h"
#include "qemu-char.h"
#include "event_notifier.h"
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
......@@ -39,9 +38,87 @@ void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque);
void qemu_aio_release(void *p);
typedef struct AioHandler AioHandler;
typedef void QEMUBHFunc(void *opaque);
typedef void IOHandler(void *opaque);
typedef struct AioContext {
/* Anchor of the list of Bottom Halves belonging to the context */
struct QEMUBH *first_bh;
/* A simple lock used to protect the first_bh list, and ensure that
* no callbacks are removed while we're walking and dispatching callbacks.
*/
int walking_bh;
} AioContext;
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
typedef int (AioFlushEventNotifierHandler)(EventNotifier *e);
/**
* aio_context_new: Allocate a new AioContext.
*
* AioContext provide a mini event-loop that can be waited on synchronously.
* They also provide bottom halves, a service to execute a piece of code
* as soon as possible.
*/
AioContext *aio_context_new(void);
/**
* aio_bh_new: Allocate a new bottom half structure.
*
* Bottom halves are lightweight callbacks whose invocation is guaranteed
* to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
* is opaque and must be allocated prior to its use.
*/
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
/**
* aio_bh_poll: Poll bottom halves for an AioContext.
*
* These are internal functions used by the QEMU main loop.
*/
int aio_bh_poll(AioContext *ctx);
void aio_bh_update_timeout(AioContext *ctx, uint32_t *timeout);
/**
* qemu_bh_schedule: Schedule a bottom half.
*
* Scheduling a bottom half interrupts the main loop and causes the
* execution of the callback that was passed to qemu_bh_new.
*
* Bottom halves that are scheduled from a bottom half handler are instantly
* invoked. This can create an infinite loop if a bottom half handler
* schedules itself.
*
* @bh: The bottom half to be scheduled.
*/
void qemu_bh_schedule(QEMUBH *bh);
/**
* qemu_bh_cancel: Cancel execution of a bottom half.
*
* Canceling execution of a bottom half undoes the effect of calls to
* qemu_bh_schedule without freeing its resources yet. While cancellation
* itself is also wait-free and thread-safe, it can of course race with the
* loop that executes bottom halves unless you are holding the iothread
* mutex. This makes it mostly useless if you are not holding the mutex.
*
* @bh: The bottom half to be canceled.
*/
void qemu_bh_cancel(QEMUBH *bh);
/**
*qemu_bh_delete: Cancel execution of a bottom half and free its resources.
*
* Deleting a bottom half frees the memory that was allocated for it by
* qemu_bh_new. It also implies canceling the bottom half if it was
* scheduled.
*
* @bh: The bottom half to be deleted.
*/
void qemu_bh_delete(QEMUBH *bh);
/* Flush any pending AIO operation. This function will block until all
* outstanding AIO operations have been completed or cancelled. */
void qemu_aio_flush(void);
......
......@@ -5,6 +5,7 @@
#include "qemu-queue.h"
#include "qemu-option.h"
#include "qemu-config.h"
#include "qemu-aio.h"
#include "qobject.h"
#include "qstring.h"
#include "main-loop.h"
......
......@@ -14,6 +14,7 @@
typedef struct QEMUTimer QEMUTimer;
typedef struct QEMUFile QEMUFile;
typedef struct QEMUBH QEMUBH;
typedef struct DeviceState DeviceState;
struct Monitor;
......
......@@ -26,7 +26,7 @@
#include "qemu-coroutine.h"
#include "qemu-coroutine-int.h"
#include "qemu-queue.h"
#include "main-loop.h"
#include "qemu-aio.h"
#include "trace.h"
static QTAILQ_HEAD(, Coroutine) unlock_bh_queue =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册