aio-posix.c 8.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * QEMU aio implementation
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
14 15 16
 */

#include "qemu-common.h"
17
#include "block/block.h"
18 19
#include "qemu/queue.h"
#include "qemu/sockets.h"
20 21 22

struct AioHandler
{
23
    GPollFD pfd;
24 25 26 27
    IOHandler *io_read;
    IOHandler *io_write;
    int deleted;
    void *opaque;
28
    bool is_external;
B
Blue Swirl 已提交
29
    QLIST_ENTRY(AioHandler) node;
30 31
};

32
static AioHandler *find_aio_handler(AioContext *ctx, int fd)
33 34 35
{
    AioHandler *node;

36
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
37
        if (node->pfd.fd == fd)
A
Alexander Graf 已提交
38 39
            if (!node->deleted)
                return node;
40 41 42 43 44
    }

    return NULL;
}

45 46
void aio_set_fd_handler(AioContext *ctx,
                        int fd,
47
                        bool is_external,
48 49 50
                        IOHandler *io_read,
                        IOHandler *io_write,
                        void *opaque)
51 52 53
{
    AioHandler *node;

54
    node = find_aio_handler(ctx, fd);
55 56 57 58

    /* Are we deleting the fd handler? */
    if (!io_read && !io_write) {
        if (node) {
P
Paolo Bonzini 已提交
59 60
            g_source_remove_poll(&ctx->source, &node->pfd);

61
            /* If the lock is held, just mark the node as deleted */
62
            if (ctx->walking_handlers) {
63
                node->deleted = 1;
64 65
                node->pfd.revents = 0;
            } else {
66 67 68 69
                /* Otherwise, delete it for real.  We can't just mark it as
                 * deleted because deleted nodes are only cleaned up after
                 * releasing the walking_handlers lock.
                 */
B
Blue Swirl 已提交
70
                QLIST_REMOVE(node, node);
71
                g_free(node);
72 73 74 75 76
            }
        }
    } else {
        if (node == NULL) {
            /* Alloc and insert if it's not already there */
77
            node = g_new0(AioHandler, 1);
78
            node->pfd.fd = fd;
79
            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
P
Paolo Bonzini 已提交
80 81

            g_source_add_poll(&ctx->source, &node->pfd);
82 83 84 85 86
        }
        /* Update handler with latest information */
        node->io_read = io_read;
        node->io_write = io_write;
        node->opaque = opaque;
87
        node->is_external = is_external;
88

89 90
        node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
        node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
91
    }
92 93

    aio_notify(ctx);
94 95
}

96 97
void aio_set_event_notifier(AioContext *ctx,
                            EventNotifier *notifier,
98
                            bool is_external,
S
Stefan Hajnoczi 已提交
99
                            EventNotifierHandler *io_read)
100
{
101
    aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
102
                       is_external, (IOHandler *)io_read, NULL, notifier);
103 104
}

105 106 107 108 109
bool aio_prepare(AioContext *ctx)
{
    return false;
}

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
bool aio_pending(AioContext *ctx)
{
    AioHandler *node;

    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
        int revents;

        revents = node->pfd.revents & node->pfd.events;
        if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
            return true;
        }
        if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
            return true;
        }
    }

    return false;
}

129
bool aio_dispatch(AioContext *ctx)
130
{
P
Paolo Bonzini 已提交
131
    AioHandler *node;
132
    bool progress = false;
133

134 135 136 137 138 139 140 141 142
    /*
     * If there are callbacks left that have been queued, we need to call them.
     * Do not call select in this case, because it is possible that the caller
     * does not need a complete flush (as is the case for aio_poll loops).
     */
    if (aio_bh_poll(ctx)) {
        progress = true;
    }

143
    /*
144
     * We have to walk very carefully in case aio_set_fd_handler is
145 146 147 148 149 150 151 152 153 154 155 156
     * called while we're walking.
     */
    node = QLIST_FIRST(&ctx->aio_handlers);
    while (node) {
        AioHandler *tmp;
        int revents;

        ctx->walking_handlers++;

        revents = node->pfd.revents & node->pfd.events;
        node->pfd.revents = 0;

157 158 159
        if (!node->deleted &&
            (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
            node->io_read) {
160
            node->io_read(node->opaque);
S
Stefan Hajnoczi 已提交
161 162 163 164 165

            /* aio_notify() does not count as progress */
            if (node->opaque != &ctx->notifier) {
                progress = true;
            }
166
        }
167 168 169
        if (!node->deleted &&
            (revents & (G_IO_OUT | G_IO_ERR)) &&
            node->io_write) {
170 171 172 173 174 175 176 177 178 179 180 181 182 183
            node->io_write(node->opaque);
            progress = true;
        }

        tmp = node;
        node = QLIST_NEXT(node, node);

        ctx->walking_handlers--;

        if (!ctx->walking_handlers && tmp->deleted) {
            QLIST_REMOVE(tmp, node);
            g_free(tmp);
        }
    }
184 185 186 187

    /* Run our timers */
    progress |= timerlistgroup_run_timers(&ctx->tlg);

188 189 190
    return progress;
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
/* These thread-local variables are used only in a small part of aio_poll
 * around the call to the poll() system call.  In particular they are not
 * used while aio_poll is performing callbacks, which makes it much easier
 * to think about reentrancy!
 *
 * Stack-allocated arrays would be perfect but they have size limitations;
 * heap allocation is expensive enough that we want to reuse arrays across
 * calls to aio_poll().  And because poll() has to be called without holding
 * any lock, the arrays cannot be stored in AioContext.  Thread-local data
 * has none of the disadvantages of these three options.
 */
static __thread GPollFD *pollfds;
static __thread AioHandler **nodes;
static __thread unsigned npfd, nalloc;
static __thread Notifier pollfds_cleanup_notifier;

static void pollfds_cleanup(Notifier *n, void *unused)
{
    g_assert(npfd == 0);
    g_free(pollfds);
    g_free(nodes);
    nalloc = 0;
}

static void add_pollfd(AioHandler *node)
{
    if (npfd == nalloc) {
        if (nalloc == 0) {
            pollfds_cleanup_notifier.notify = pollfds_cleanup;
            qemu_thread_atexit_add(&pollfds_cleanup_notifier);
            nalloc = 8;
        } else {
            g_assert(nalloc <= INT_MAX);
            nalloc *= 2;
        }
        pollfds = g_renew(GPollFD, pollfds, nalloc);
        nodes = g_renew(AioHandler *, nodes, nalloc);
    }
    nodes[npfd] = node;
    pollfds[npfd] = (GPollFD) {
        .fd = node->pfd.fd,
        .events = node->pfd.events,
    };
    npfd++;
}

237 238 239
bool aio_poll(AioContext *ctx, bool blocking)
{
    AioHandler *node;
240
    int i, ret;
S
Stefan Hajnoczi 已提交
241
    bool progress;
242
    int64_t timeout;
243

244
    aio_context_acquire(ctx);
245 246
    progress = false;

P
Paolo Bonzini 已提交
247 248
    /* aio_notify can avoid the expensive event_notifier_set if
     * everything (file descriptors, bottom halves, timers) will
249 250
     * be re-evaluated before the next blocking poll().  This is
     * already true when aio_poll is called with blocking == false;
251 252
     * if blocking == true, it is only true after poll() returns,
     * so disable the optimization now.
P
Paolo Bonzini 已提交
253
     */
254 255 256
    if (blocking) {
        atomic_add(&ctx->notify_me, 2);
    }
P
Paolo Bonzini 已提交
257

258
    ctx->walking_handlers++;
259

260
    assert(npfd == 0);
261

262
    /* fill pollfds */
263
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
264
        if (!node->deleted && node->pfd.events) {
265
            add_pollfd(node);
P
Paolo Bonzini 已提交
266 267
        }
    }
268

269
    timeout = blocking ? aio_compute_timeout(ctx) : 0;
270

P
Paolo Bonzini 已提交
271
    /* wait until next event */
272 273 274
    if (timeout) {
        aio_context_release(ctx);
    }
275
    ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
276 277 278
    if (blocking) {
        atomic_sub(&ctx->notify_me, 2);
    }
279 280 281
    if (timeout) {
        aio_context_acquire(ctx);
    }
P
Paolo Bonzini 已提交
282

283
    aio_notify_accept(ctx);
284

P
Paolo Bonzini 已提交
285 286
    /* if we have any readable fds, dispatch event */
    if (ret > 0) {
287 288
        for (i = 0; i < npfd; i++) {
            nodes[i]->pfd.revents = pollfds[i].revents;
289
        }
290 291
    }

292 293 294
    npfd = 0;
    ctx->walking_handlers--;

295 296 297
    /* Run dispatch even if there were no readable fds to run timers */
    if (aio_dispatch(ctx)) {
        progress = true;
P
Paolo Bonzini 已提交
298
    }
299

300 301
    aio_context_release(ctx);

S
Stefan Hajnoczi 已提交
302
    return progress;
303
}