posix-aio-compat.c 10.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * QEMU posix-aio emulation
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

14
#include <sys/ioctl.h>
15 16 17
#include <pthread.h>
#include <unistd.h>
#include <errno.h>
18
#include <time.h>
M
malc 已提交
19 20 21
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
22
#include "osdep.h"
23
#include "qemu-common.h"
24 25 26 27 28 29

#include "posix-aio-compat.h"

static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static pthread_t thread_id;
M
malc 已提交
30
static pthread_attr_t attr;
31 32 33 34 35
static int max_threads = 64;
static int cur_threads = 0;
static int idle_threads = 0;
static TAILQ_HEAD(, qemu_paiocb) request_list;

36 37 38 39 40 41
#ifdef HAVE_PREADV
static int preadv_present = 1;
#else
static int preadv_present = 0;
#endif

M
malc 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
static void die2(int err, const char *what)
{
    fprintf(stderr, "%s failed: %s\n", what, strerror(err));
    abort();
}

static void die(const char *what)
{
    die2(errno, what);
}

static void mutex_lock(pthread_mutex_t *mutex)
{
    int ret = pthread_mutex_lock(mutex);
    if (ret) die2(ret, "pthread_mutex_lock");
}

static void mutex_unlock(pthread_mutex_t *mutex)
{
    int ret = pthread_mutex_unlock(mutex);
    if (ret) die2(ret, "pthread_mutex_unlock");
}

static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
                           struct timespec *ts)
{
    int ret = pthread_cond_timedwait(cond, mutex, ts);
    if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
    return ret;
}

M
malc 已提交
73
static void cond_signal(pthread_cond_t *cond)
M
malc 已提交
74
{
M
malc 已提交
75 76
    int ret = pthread_cond_signal(cond);
    if (ret) die2(ret, "pthread_cond_signal");
M
malc 已提交
77 78 79 80 81 82 83 84 85
}

static void thread_create(pthread_t *thread, pthread_attr_t *attr,
                          void *(*start_routine)(void*), void *arg)
{
    int ret = pthread_create(thread, attr, start_routine, arg);
    if (ret) die2(ret, "pthread_create");
}

86 87 88 89 90 91 92
static size_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb)
{
	int ret;

	ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf);
	if (ret == -1)
		return -errno;
C
Christoph Hellwig 已提交
93 94 95 96 97 98 99 100 101 102

	/*
	 * This looks weird, but the aio code only consideres a request
	 * successfull if it has written the number full number of bytes.
	 *
	 * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command,
	 * so in fact we return the ioctl command here to make posix_aio_read()
	 * happy..
	 */
	return aiocb->aio_nbytes;
103 104
}

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
#ifdef HAVE_PREADV

static ssize_t
qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
{
    return preadv(fd, iov, nr_iov, offset);
}

static ssize_t
qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
{
    return pwritev(fd, iov, nr_iov, offset);
}

#else

static ssize_t
qemu_preadv(int fd, const struct iovec *iov, int nr_iov, off_t offset)
{
    return -ENOSYS;
}

static ssize_t
qemu_pwritev(int fd, const struct iovec *iov, int nr_iov, off_t offset)
{
    return -ENOSYS;
}

#endif

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
/*
 * Check if we need to copy the data in the aiocb into a new
 * properly aligned buffer.
 */
static int aiocb_needs_copy(struct qemu_paiocb *aiocb)
{
    if (aiocb->aio_flags & QEMU_AIO_SECTOR_ALIGNED) {
        int i;

        for (i = 0; i < aiocb->aio_niov; i++)
            if ((uintptr_t) aiocb->aio_iov[i].iov_base % 512)
                return 1;
    }

    return 0;
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static size_t handle_aiocb_rw_vector(struct qemu_paiocb *aiocb)
{
    size_t offset = 0;
    ssize_t len;

    do {
        if (aiocb->aio_type == QEMU_PAIO_WRITE)
            len = qemu_pwritev(aiocb->aio_fildes,
                               aiocb->aio_iov,
                               aiocb->aio_niov,
                               aiocb->aio_offset + offset);
         else
            len = qemu_preadv(aiocb->aio_fildes,
                              aiocb->aio_iov,
                              aiocb->aio_niov,
                              aiocb->aio_offset + offset);
    } while (len == -1 && errno == EINTR);

    if (len == -1)
        return -errno;
    return len;
}

175
static size_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf)
176 177
{
    size_t offset = 0;
178
    size_t len;
179 180

    while (offset < aiocb->aio_nbytes) {
181 182 183 184 185 186 187 188
         if (aiocb->aio_type == QEMU_PAIO_WRITE)
             len = pwrite(aiocb->aio_fildes,
                          (const char *)buf + offset,
                          aiocb->aio_nbytes - offset,
                          aiocb->aio_offset + offset);
         else
             len = pread(aiocb->aio_fildes,
                         buf + offset,
189 190 191
                         aiocb->aio_nbytes - offset,
                         aiocb->aio_offset + offset);

192 193 194 195 196 197 198 199 200
         if (len == -1 && errno == EINTR)
             continue;
         else if (len == -1) {
             offset = -errno;
             break;
         } else if (len == 0)
             break;

         offset += len;
201 202 203 204 205
    }

    return offset;
}

206
static size_t handle_aiocb_rw(struct qemu_paiocb *aiocb)
207
{
208 209 210
    size_t nbytes;
    char *buf;

211
    if (!aiocb_needs_copy(aiocb)) {
212 213 214 215
        /*
         * If there is just a single buffer, and it is properly aligned
         * we can just use plain pread/pwrite without any problems.
         */
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
        if (aiocb->aio_niov == 1)
             return handle_aiocb_rw_linear(aiocb, aiocb->aio_iov->iov_base);

        /*
         * We have more than one iovec, and all are properly aligned.
         *
         * Try preadv/pwritev first and fall back to linearizing the
         * buffer if it's not supported.
         */
	if (preadv_present) {
            nbytes = handle_aiocb_rw_vector(aiocb);
            if (nbytes == aiocb->aio_nbytes)
	        return nbytes;
            if (nbytes < 0 && nbytes != -ENOSYS)
                return nbytes;
            preadv_present = 0;
        }

        /*
         * XXX(hch): short read/write.  no easy way to handle the reminder
         * using these interfaces.  For now retry using plain
         * pread/pwrite?
         */
239
    }
240

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
    /*
     * Ok, we have to do it the hard way, copy all segments into
     * a single aligned buffer.
     */
    buf = qemu_memalign(512, aiocb->aio_nbytes);
    if (aiocb->aio_type == QEMU_PAIO_WRITE) {
        char *p = buf;
        int i;

        for (i = 0; i < aiocb->aio_niov; ++i) {
            memcpy(p, aiocb->aio_iov[i].iov_base, aiocb->aio_iov[i].iov_len);
            p += aiocb->aio_iov[i].iov_len;
        }
    }

    nbytes = handle_aiocb_rw_linear(aiocb, buf);
    if (aiocb->aio_type != QEMU_PAIO_WRITE) {
        char *p = buf;
        size_t count = aiocb->aio_nbytes, copy;
        int i;

        for (i = 0; i < aiocb->aio_niov && count; ++i) {
            copy = count;
            if (copy > aiocb->aio_iov[i].iov_len)
                copy = aiocb->aio_iov[i].iov_len;
            memcpy(aiocb->aio_iov[i].iov_base, p, copy);
            p     += copy;
            count -= copy;
        }
    }
    qemu_vfree(buf);

    return nbytes;
274 275
}

276 277
static void *aio_thread(void *unused)
{
M
malc 已提交
278
    pid_t pid;
279 280
    sigset_t set;

M
malc 已提交
281 282
    pid = getpid();

283
    /* block all signals */
M
malc 已提交
284 285
    if (sigfillset(&set)) die("sigfillset");
    if (sigprocmask(SIG_BLOCK, &set, NULL)) die("sigprocmask");
286 287 288

    while (1) {
        struct qemu_paiocb *aiocb;
289
        size_t ret = 0;
290 291 292 293 294 295
        qemu_timeval tv;
        struct timespec ts;

        qemu_gettimeofday(&tv);
        ts.tv_sec = tv.tv_sec + 10;
        ts.tv_nsec = 0;
296

M
malc 已提交
297
        mutex_lock(&lock);
298 299 300

        while (TAILQ_EMPTY(&request_list) &&
               !(ret == ETIMEDOUT)) {
M
malc 已提交
301
            ret = cond_timedwait(&cond, &lock, &ts);
302 303
        }

304
        if (TAILQ_EMPTY(&request_list))
305 306 307 308 309 310
            break;

        aiocb = TAILQ_FIRST(&request_list);
        TAILQ_REMOVE(&request_list, aiocb, node);
        aiocb->active = 1;
        idle_threads--;
M
malc 已提交
311
        mutex_unlock(&lock);
312

313 314 315
        switch (aiocb->aio_type) {
        case QEMU_PAIO_READ:
        case QEMU_PAIO_WRITE:
316
		ret = handle_aiocb_rw(aiocb);
317 318 319 320 321 322 323 324 325
		break;
        case QEMU_PAIO_IOCTL:
		ret = handle_aiocb_ioctl(aiocb);
		break;
	default:
		fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
		ret = -EINVAL;
		break;
	}
326

M
malc 已提交
327
        mutex_lock(&lock);
328
        aiocb->ret = ret;
329
        idle_threads++;
M
malc 已提交
330
        mutex_unlock(&lock);
331

M
malc 已提交
332
        if (kill(pid, aiocb->ev_signo)) die("kill failed");
333 334 335 336
    }

    idle_threads--;
    cur_threads--;
M
malc 已提交
337
    mutex_unlock(&lock);
338 339 340 341

    return NULL;
}

M
malc 已提交
342
static void spawn_thread(void)
343 344 345
{
    cur_threads++;
    idle_threads++;
M
malc 已提交
346
    thread_create(&thread_id, &attr, aio_thread, NULL);
347 348 349 350
}

int qemu_paio_init(struct qemu_paioinit *aioinit)
{
M
malc 已提交
351 352 353 354 355 356 357 358
    int ret;

    ret = pthread_attr_init(&attr);
    if (ret) die2(ret, "pthread_attr_init");

    ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
    if (ret) die2(ret, "pthread_attr_setdetachstate");

359 360 361 362 363
    TAILQ_INIT(&request_list);

    return 0;
}

364
static int qemu_paio_submit(struct qemu_paiocb *aiocb, int type)
365
{
366
    aiocb->aio_type = type;
367 368
    aiocb->ret = -EINPROGRESS;
    aiocb->active = 0;
M
malc 已提交
369
    mutex_lock(&lock);
370 371 372
    if (idle_threads == 0 && cur_threads < max_threads)
        spawn_thread();
    TAILQ_INSERT_TAIL(&request_list, aiocb, node);
M
malc 已提交
373
    mutex_unlock(&lock);
M
malc 已提交
374
    cond_signal(&cond);
375 376 377 378 379 380

    return 0;
}

int qemu_paio_read(struct qemu_paiocb *aiocb)
{
381
    return qemu_paio_submit(aiocb, QEMU_PAIO_READ);
382 383 384 385
}

int qemu_paio_write(struct qemu_paiocb *aiocb)
{
386 387 388 389 390 391
    return qemu_paio_submit(aiocb, QEMU_PAIO_WRITE);
}

int qemu_paio_ioctl(struct qemu_paiocb *aiocb)
{
    return qemu_paio_submit(aiocb, QEMU_PAIO_IOCTL);
392 393 394 395 396 397
}

ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
{
    ssize_t ret;

M
malc 已提交
398
    mutex_lock(&lock);
399
    ret = aiocb->ret;
M
malc 已提交
400
    mutex_unlock(&lock);
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420

    return ret;
}

int qemu_paio_error(struct qemu_paiocb *aiocb)
{
    ssize_t ret = qemu_paio_return(aiocb);

    if (ret < 0)
        ret = -ret;
    else
        ret = 0;

    return ret;
}

int qemu_paio_cancel(int fd, struct qemu_paiocb *aiocb)
{
    int ret;

M
malc 已提交
421
    mutex_lock(&lock);
422 423 424 425 426 427 428 429
    if (!aiocb->active) {
        TAILQ_REMOVE(&request_list, aiocb, node);
        aiocb->ret = -ECANCELED;
        ret = QEMU_PAIO_CANCELED;
    } else if (aiocb->ret == -EINPROGRESS)
        ret = QEMU_PAIO_NOTCANCELED;
    else
        ret = QEMU_PAIO_ALLDONE;
M
malc 已提交
430
    mutex_unlock(&lock);
431 432 433

    return ret;
}