stream.c 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Image streaming
 *
 * Copyright IBM, Corp. 2011
 *
 * Authors:
 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
 *
 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
 * See the COPYING.LIB file in the top-level directory.
 *
 */

#include "trace.h"
#include "block_int.h"

enum {
    /*
     * Size of data buffer for populating the image file.  This should be large
     * enough to process multiple clusters in a single call, so that populating
     * contiguous regions of the image is efficient.
     */
    STREAM_BUFFER_SIZE = 512 * 1024, /* in bytes */
};

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
#define SLICE_TIME 100000000ULL /* ns */

typedef struct {
    int64_t next_slice_time;
    uint64_t slice_quota;
    uint64_t dispatched;
} RateLimit;

static int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
{
    int64_t delay_ns = 0;
    int64_t now = qemu_get_clock_ns(rt_clock);

    if (limit->next_slice_time < now) {
        limit->next_slice_time = now + SLICE_TIME;
        limit->dispatched = 0;
    }
    if (limit->dispatched + n > limit->slice_quota) {
        delay_ns = limit->next_slice_time - now;
    } else {
        limit->dispatched += n;
    }
    return delay_ns;
}

static void ratelimit_set_speed(RateLimit *limit, uint64_t speed)
{
    limit->slice_quota = speed / (1000000000ULL / SLICE_TIME);
}

56 57
typedef struct StreamBlockJob {
    BlockJob common;
58
    RateLimit limit;
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
    BlockDriverState *base;
} StreamBlockJob;

static int coroutine_fn stream_populate(BlockDriverState *bs,
                                        int64_t sector_num, int nb_sectors,
                                        void *buf)
{
    struct iovec iov = {
        .iov_base = buf,
        .iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
    };
    QEMUIOVector qiov;

    qemu_iovec_init_external(&qiov, &iov, 1);

    /* Copy-on-read the unallocated clusters */
    return bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, &qiov);
}

static void coroutine_fn stream_run(void *opaque)
{
    StreamBlockJob *s = opaque;
    BlockDriverState *bs = s->common.bs;
    int64_t sector_num, end;
    int ret = 0;
    int n;
    void *buf;

    s->common.len = bdrv_getlength(bs);
    if (s->common.len < 0) {
        block_job_complete(&s->common, s->common.len);
        return;
    }

    end = s->common.len >> BDRV_SECTOR_BITS;
    buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);

    /* Turn on copy-on-read for the whole block device so that guest read
     * requests help us make progress.  Only do this when copying the entire
     * backing chain since the copy-on-read operation does not take base into
     * account.
     */
    if (!base) {
        bdrv_enable_copy_on_read(bs);
    }

    for (sector_num = 0; sector_num < end; sector_num += n) {
106
retry:
107 108 109 110 111 112 113 114
        if (block_job_is_cancelled(&s->common)) {
            break;
        }

        ret = bdrv_co_is_allocated(bs, sector_num,
                                   STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n);
        trace_stream_one_iteration(s, sector_num, n, ret);
        if (ret == 0) {
115 116 117 118 119 120 121 122 123
            if (s->common.speed) {
                uint64_t delay_ns = ratelimit_calculate_delay(&s->limit, n);
                if (delay_ns > 0) {
                    co_sleep_ns(rt_clock, delay_ns);

                    /* Recheck cancellation and that sectors are unallocated */
                    goto retry;
                }
            }
124 125 126 127 128 129 130 131
            ret = stream_populate(bs, sector_num, n, buf);
        }
        if (ret < 0) {
            break;
        }

        /* Publish progress */
        s->common.offset += n * BDRV_SECTOR_SIZE;
132 133 134 135 136

        /* Note that even when no rate limit is applied we need to yield
         * with no pending I/O here so that qemu_aio_flush() returns.
         */
        co_sleep_ns(rt_clock, 0);
137 138 139 140 141 142 143 144 145 146 147 148 149 150
    }

    if (!base) {
        bdrv_disable_copy_on_read(bs);
    }

    if (sector_num == end && ret == 0) {
        ret = bdrv_change_backing_file(bs, NULL, NULL);
    }

    qemu_vfree(buf);
    block_job_complete(&s->common, ret);
}

151 152 153 154 155 156 157 158 159 160 161 162
static int stream_set_speed(BlockJob *job, int64_t value)
{
    StreamBlockJob *s = container_of(job, StreamBlockJob, common);

    if (value < 0) {
        return -EINVAL;
    }
    job->speed = value;
    ratelimit_set_speed(&s->limit, value / BDRV_SECTOR_SIZE);
    return 0;
}

163 164 165
static BlockJobType stream_job_type = {
    .instance_size = sizeof(StreamBlockJob),
    .job_type      = "stream",
166
    .set_speed     = stream_set_speed,
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
};

int stream_start(BlockDriverState *bs, BlockDriverState *base,
                 BlockDriverCompletionFunc *cb, void *opaque)
{
    StreamBlockJob *s;
    Coroutine *co;

    s = block_job_create(&stream_job_type, bs, cb, opaque);
    if (!s) {
        return -EBUSY; /* bs must already be in use */
    }

    s->base = base;

    co = qemu_coroutine_create(stream_run);
    trace_stream_start(bs, base, s, co, opaque);
    qemu_coroutine_enter(co, s);
    return 0;
}