async.c 7.5 KB
Newer Older
K
Kevin Wolf 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

#include "qemu-common.h"
26
#include "block/aio.h"
27
#include "block/thread-pool.h"
28
#include "qemu/main-loop.h"
29

K
Kevin Wolf 已提交
30 31 32 33
/***********************************************************/
/* bottom halves (can be seen as timers which expire ASAP) */

struct QEMUBH {
P
Paolo Bonzini 已提交
34
    AioContext *ctx;
K
Kevin Wolf 已提交
35 36 37
    QEMUBHFunc *cb;
    void *opaque;
    QEMUBH *next;
38 39 40
    bool scheduled;
    bool idle;
    bool deleted;
K
Kevin Wolf 已提交
41 42
};

43
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
K
Kevin Wolf 已提交
44 45
{
    QEMUBH *bh;
46
    bh = g_malloc0(sizeof(QEMUBH));
P
Paolo Bonzini 已提交
47
    bh->ctx = ctx;
K
Kevin Wolf 已提交
48 49
    bh->cb = cb;
    bh->opaque = opaque;
50
    qemu_mutex_lock(&ctx->bh_lock);
51
    bh->next = ctx->first_bh;
52 53
    /* Make sure that the members are ready before putting bh into list */
    smp_wmb();
54
    ctx->first_bh = bh;
55
    qemu_mutex_unlock(&ctx->bh_lock);
K
Kevin Wolf 已提交
56 57 58
    return bh;
}

59
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
60
int aio_bh_poll(AioContext *ctx)
K
Kevin Wolf 已提交
61
{
62
    QEMUBH *bh, **bhp, *next;
K
Kevin Wolf 已提交
63
    int ret;
64

65
    ctx->walking_bh++;
K
Kevin Wolf 已提交
66 67

    ret = 0;
68
    for (bh = ctx->first_bh; bh; bh = next) {
69 70
        /* Make sure that fetching bh happens before accessing its members */
        smp_read_barrier_depends();
71
        next = bh->next;
K
Kevin Wolf 已提交
72 73
        if (!bh->deleted && bh->scheduled) {
            bh->scheduled = 0;
74 75 76 77
            /* Paired with write barrier in bh schedule to ensure reading for
             * idle & callbacks coming after bh's scheduling.
             */
            smp_rmb();
K
Kevin Wolf 已提交
78 79 80 81 82 83 84
            if (!bh->idle)
                ret = 1;
            bh->idle = 0;
            bh->cb(bh->opaque);
        }
    }

85
    ctx->walking_bh--;
86

K
Kevin Wolf 已提交
87
    /* remove deleted bhs */
88
    if (!ctx->walking_bh) {
89
        qemu_mutex_lock(&ctx->bh_lock);
90
        bhp = &ctx->first_bh;
91 92 93 94 95 96 97 98 99
        while (*bhp) {
            bh = *bhp;
            if (bh->deleted) {
                *bhp = bh->next;
                g_free(bh);
            } else {
                bhp = &bh->next;
            }
        }
100
        qemu_mutex_unlock(&ctx->bh_lock);
K
Kevin Wolf 已提交
101 102 103 104 105 106 107 108 109 110
    }

    return ret;
}

void qemu_bh_schedule_idle(QEMUBH *bh)
{
    if (bh->scheduled)
        return;
    bh->idle = 1;
111 112 113 114 115
    /* Make sure that idle & any writes needed by the callback are done
     * before the locations are read in the aio_bh_poll.
     */
    smp_wmb();
    bh->scheduled = 1;
K
Kevin Wolf 已提交
116 117 118 119
}

void qemu_bh_schedule(QEMUBH *bh)
{
120 121
    AioContext *ctx;

K
Kevin Wolf 已提交
122 123
    if (bh->scheduled)
        return;
124
    ctx = bh->ctx;
K
Kevin Wolf 已提交
125
    bh->idle = 0;
126 127 128 129 130
    /* Make sure that:
     * 1. idle & any writes needed by the callback are done before the
     *    locations are read in the aio_bh_poll.
     * 2. ctx is loaded before scheduled is set and the callback has a chance
     *    to execute.
131
     */
132
    smp_mb();
133
    bh->scheduled = 1;
134
    aio_notify(ctx);
K
Kevin Wolf 已提交
135 136
}

137 138 139

/* This func is async.
 */
K
Kevin Wolf 已提交
140 141 142 143 144
void qemu_bh_cancel(QEMUBH *bh)
{
    bh->scheduled = 0;
}

145 146 147
/* This func is async.The bottom half will do the delete action at the finial
 * end.
 */
K
Kevin Wolf 已提交
148 149 150 151 152 153
void qemu_bh_delete(QEMUBH *bh)
{
    bh->scheduled = 0;
    bh->deleted = 1;
}

P
Paolo Bonzini 已提交
154 155
static gboolean
aio_ctx_prepare(GSource *source, gint    *timeout)
K
Kevin Wolf 已提交
156
{
P
Paolo Bonzini 已提交
157
    AioContext *ctx = (AioContext *) source;
K
Kevin Wolf 已提交
158
    QEMUBH *bh;
159
    int deadline;
K
Kevin Wolf 已提交
160

161 162
    /* We assume there is no timeout already supplied */
    *timeout = -1;
163
    for (bh = ctx->first_bh; bh; bh = bh->next) {
K
Kevin Wolf 已提交
164 165 166 167
        if (!bh->deleted && bh->scheduled) {
            if (bh->idle) {
                /* idle bottom halves will be polled at least
                 * every 10ms */
P
Paolo Bonzini 已提交
168
                *timeout = 10;
K
Kevin Wolf 已提交
169 170 171 172
            } else {
                /* non-idle bottom halves will be executed
                 * immediately */
                *timeout = 0;
173
                return true;
K
Kevin Wolf 已提交
174 175 176
            }
        }
    }
P
Paolo Bonzini 已提交
177

178 179 180 181 182 183 184 185
    deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
    if (deadline == 0) {
        *timeout = 0;
        return true;
    } else {
        *timeout = qemu_soonest_timeout(*timeout, deadline);
    }

186
    return false;
P
Paolo Bonzini 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199
}

static gboolean
aio_ctx_check(GSource *source)
{
    AioContext *ctx = (AioContext *) source;
    QEMUBH *bh;

    for (bh = ctx->first_bh; bh; bh = bh->next) {
        if (!bh->deleted && bh->scheduled) {
            return true;
	}
    }
200
    return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
P
Paolo Bonzini 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214
}

static gboolean
aio_ctx_dispatch(GSource     *source,
                 GSourceFunc  callback,
                 gpointer     user_data)
{
    AioContext *ctx = (AioContext *) source;

    assert(callback == NULL);
    aio_poll(ctx, false);
    return true;
}

P
Paolo Bonzini 已提交
215 216 217 218 219
static void
aio_ctx_finalize(GSource     *source)
{
    AioContext *ctx = (AioContext *) source;

220
    thread_pool_free(ctx->thread_pool);
S
Stefan Hajnoczi 已提交
221
    aio_set_event_notifier(ctx, &ctx->notifier, NULL);
P
Paolo Bonzini 已提交
222
    event_notifier_cleanup(&ctx->notifier);
223
    rfifolock_destroy(&ctx->lock);
224
    qemu_mutex_destroy(&ctx->bh_lock);
225
    g_array_free(ctx->pollfds, TRUE);
226
    timerlistgroup_deinit(&ctx->tlg);
P
Paolo Bonzini 已提交
227 228
}

P
Paolo Bonzini 已提交
229 230 231 232
static GSourceFuncs aio_source_funcs = {
    aio_ctx_prepare,
    aio_ctx_check,
    aio_ctx_dispatch,
P
Paolo Bonzini 已提交
233
    aio_ctx_finalize
P
Paolo Bonzini 已提交
234 235 236 237 238 239 240
};

GSource *aio_get_g_source(AioContext *ctx)
{
    g_source_ref(&ctx->source);
    return &ctx->source;
}
241

242 243 244 245 246 247 248 249
ThreadPool *aio_get_thread_pool(AioContext *ctx)
{
    if (!ctx->thread_pool) {
        ctx->thread_pool = thread_pool_new(ctx);
    }
    return ctx->thread_pool;
}

P
Paolo Bonzini 已提交
250 251 252 253 254
void aio_notify(AioContext *ctx)
{
    event_notifier_set(&ctx->notifier);
}

255 256 257 258 259
static void aio_timerlist_notify(void *opaque)
{
    aio_notify(opaque);
}

260 261 262 263 264 265
static void aio_rfifolock_cb(void *opaque)
{
    /* Kick owner thread in case they are blocked in aio_poll() */
    aio_notify(opaque);
}

266 267
AioContext *aio_context_new(void)
{
P
Paolo Bonzini 已提交
268 269
    AioContext *ctx;
    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
270
    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
271
    ctx->thread_pool = NULL;
272
    qemu_mutex_init(&ctx->bh_lock);
273
    rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
P
Paolo Bonzini 已提交
274 275 276
    event_notifier_init(&ctx->notifier, false);
    aio_set_event_notifier(ctx, &ctx->notifier, 
                           (EventNotifierHandler *)
S
Stefan Hajnoczi 已提交
277
                           event_notifier_test_and_clear);
278
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
P
Paolo Bonzini 已提交
279 280

    return ctx;
P
Paolo Bonzini 已提交
281 282 283 284 285 286 287 288 289 290
}

void aio_context_ref(AioContext *ctx)
{
    g_source_ref(&ctx->source);
}

void aio_context_unref(AioContext *ctx)
{
    g_source_unref(&ctx->source);
291
}
292 293 294 295 296 297 298 299 300 301

void aio_context_acquire(AioContext *ctx)
{
    rfifolock_lock(&ctx->lock);
}

void aio_context_release(AioContext *ctx)
{
    rfifolock_unlock(&ctx->lock);
}