mirror.c 24.9 KB
Newer Older
P
Paolo Bonzini 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Image mirroring
 *
 * Copyright Red Hat, Inc. 2012
 *
 * Authors:
 *  Paolo Bonzini  <pbonzini@redhat.com>
 *
 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
 * See the COPYING.LIB file in the top-level directory.
 *
 */

#include "trace.h"
15 16
#include "block/blockjob.h"
#include "block/block_int.h"
P
Paolo Bonzini 已提交
17
#include "qemu/ratelimit.h"
18
#include "qemu/bitmap.h"
P
Paolo Bonzini 已提交
19

20 21 22 23 24 25 26 27 28
#define SLICE_TIME    100000000ULL /* ns */
#define MAX_IN_FLIGHT 16

/* The mirroring buffer is a list of granularity-sized chunks.
 * Free chunks are organized in a list.
 */
typedef struct MirrorBuffer {
    QSIMPLEQ_ENTRY(MirrorBuffer) next;
} MirrorBuffer;
P
Paolo Bonzini 已提交
29 30 31 32 33

typedef struct MirrorBlockJob {
    BlockJob common;
    RateLimit limit;
    BlockDriverState *target;
F
Fam Zheng 已提交
34
    BlockDriverState *base;
35 36 37 38 39 40
    /* The name of the graph node to replace */
    char *replaces;
    /* The BDS to replace */
    BlockDriverState *to_replace;
    /* Used to block operations on the drive-mirror-replace target */
    Error *replace_blocker;
F
Fam Zheng 已提交
41
    bool is_none_mode;
42
    BlockdevOnError on_source_error, on_target_error;
P
Paolo Bonzini 已提交
43 44
    bool synced;
    bool should_complete;
P
Paolo Bonzini 已提交
45
    int64_t sector_num;
46
    int64_t granularity;
47
    size_t buf_size;
M
Max Reitz 已提交
48
    int64_t bdev_length;
49
    unsigned long *cow_bitmap;
F
Fam Zheng 已提交
50
    BdrvDirtyBitmap *dirty_bitmap;
51
    HBitmapIter hbi;
P
Paolo Bonzini 已提交
52
    uint8_t *buf;
53 54
    QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
    int buf_free_count;
55

56
    unsigned long *in_flight_bitmap;
57
    int in_flight;
M
Max Reitz 已提交
58
    int sectors_in_flight;
59
    int ret;
P
Paolo Bonzini 已提交
60 61
} MirrorBlockJob;

62 63 64 65 66 67 68
typedef struct MirrorOp {
    MirrorBlockJob *s;
    QEMUIOVector qiov;
    int64_t sector_num;
    int nb_sectors;
} MirrorOp;

69 70 71 72 73 74 75 76 77 78 79 80 81
static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
                                            int error)
{
    s->synced = false;
    if (read) {
        return block_job_error_action(&s->common, s->common.bs,
                                      s->on_source_error, true, error);
    } else {
        return block_job_error_action(&s->common, s->target,
                                      s->on_target_error, false, error);
    }
}

82 83 84
static void mirror_iteration_done(MirrorOp *op, int ret)
{
    MirrorBlockJob *s = op->s;
85
    struct iovec *iov;
86
    int64_t chunk_num;
87
    int i, nb_chunks, sectors_per_chunk;
88 89 90 91

    trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);

    s->in_flight--;
M
Max Reitz 已提交
92
    s->sectors_in_flight -= op->nb_sectors;
93 94 95 96 97 98 99
    iov = op->qiov.iov;
    for (i = 0; i < op->qiov.niov; i++) {
        MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
        QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
        s->buf_free_count++;
    }

100 101 102
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
    chunk_num = op->sector_num / sectors_per_chunk;
    nb_chunks = op->nb_sectors / sectors_per_chunk;
103
    bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
M
Max Reitz 已提交
104 105 106 107 108
    if (ret >= 0) {
        if (s->cow_bitmap) {
            bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
        }
        s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
109 110
    }

Z
Zhang Min 已提交
111
    qemu_iovec_destroy(&op->qiov);
112
    g_slice_free(MirrorOp, op);
113 114 115 116 117 118 119 120

    /* Enter coroutine when it is not sleeping.  The coroutine sleeps to
     * rate-limit itself.  The coroutine will eventually resume since there is
     * a sleep timeout so don't wake it early.
     */
    if (s->common.busy) {
        qemu_coroutine_enter(s->common.co, NULL);
    }
121 122 123 124 125 126 127 128 129 130 131 132
}

static void mirror_write_complete(void *opaque, int ret)
{
    MirrorOp *op = opaque;
    MirrorBlockJob *s = op->s;
    if (ret < 0) {
        BlockDriverState *source = s->common.bs;
        BlockErrorAction action;

        bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
        action = mirror_error_action(s, false, -ret);
W
Wenchao Xia 已提交
133
        if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
            s->ret = ret;
        }
    }
    mirror_iteration_done(op, ret);
}

static void mirror_read_complete(void *opaque, int ret)
{
    MirrorOp *op = opaque;
    MirrorBlockJob *s = op->s;
    if (ret < 0) {
        BlockDriverState *source = s->common.bs;
        BlockErrorAction action;

        bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
        action = mirror_error_action(s, true, -ret);
W
Wenchao Xia 已提交
150
        if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
151 152 153 154 155 156 157 158 159 160
            s->ret = ret;
        }

        mirror_iteration_done(op, ret);
        return;
    }
    bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
                    mirror_write_complete, op);
}

161
static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
P
Paolo Bonzini 已提交
162 163
{
    BlockDriverState *source = s->common.bs;
164
    int nb_sectors, sectors_per_chunk, nb_chunks;
165
    int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
166
    uint64_t delay_ns = 0;
167
    MirrorOp *op;
P
Paolo Bonzini 已提交
168

169 170
    s->sector_num = hbitmap_iter_next(&s->hbi);
    if (s->sector_num < 0) {
F
Fam Zheng 已提交
171
        bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi);
172
        s->sector_num = hbitmap_iter_next(&s->hbi);
F
Fam Zheng 已提交
173 174
        trace_mirror_restart_iter(s,
                                  bdrv_get_dirty_count(source, s->dirty_bitmap));
175 176 177
        assert(s->sector_num >= 0);
    }

178
    hbitmap_next_sector = s->sector_num;
179 180
    sector_num = s->sector_num;
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
M
Max Reitz 已提交
181
    end = s->bdev_length / BDRV_SECTOR_SIZE;
182

183 184
    /* Extend the QEMUIOVector to include all adjacent blocks that will
     * be copied in this operation.
185
     *
186 187 188 189 190 191 192 193 194
     * We have to do this if we have no backing file yet in the destination,
     * and the cluster size is very large.  Then we need to do COW ourselves.
     * The first time a cluster is copied, copy it entirely.  Note that,
     * because both the granularity and the cluster size are powers of two,
     * the number of sectors to copy cannot exceed one cluster.
     *
     * We also want to extend the QEMUIOVector to include more adjacent
     * dirty blocks if possible, to limit the number of I/O operations and
     * run efficiently even with a small granularity.
195
     */
196 197 198 199
    nb_chunks = 0;
    nb_sectors = 0;
    next_sector = sector_num;
    next_chunk = sector_num / sectors_per_chunk;
200 201

    /* Wait for I/O to this cluster (from a previous iteration) to be done.  */
202
    while (test_bit(next_chunk, s->in_flight_bitmap)) {
203 204
        trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
        qemu_coroutine_yield();
205 206
    }

207 208 209
    do {
        int added_sectors, added_chunks;

F
Fam Zheng 已提交
210
        if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
            test_bit(next_chunk, s->in_flight_bitmap)) {
            assert(nb_sectors > 0);
            break;
        }

        added_sectors = sectors_per_chunk;
        if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
            bdrv_round_to_clusters(s->target,
                                   next_sector, added_sectors,
                                   &next_sector, &added_sectors);

            /* On the first iteration, the rounding may make us copy
             * sectors before the first dirty one.
             */
            if (next_sector < sector_num) {
                assert(nb_sectors == 0);
                sector_num = next_sector;
                next_chunk = next_sector / sectors_per_chunk;
            }
        }

        added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
        added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;

        /* When doing COW, it may happen that there is not enough space for
         * a full cluster.  Wait if that is the case.
         */
        while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
            trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
            qemu_coroutine_yield();
        }
        if (s->buf_free_count < nb_chunks + added_chunks) {
            trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
            break;
        }

        /* We have enough free space to copy these sectors.  */
        bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
249

250 251 252 253
        nb_sectors += added_sectors;
        nb_chunks += added_chunks;
        next_sector += added_sectors;
        next_chunk += added_chunks;
254 255 256 257
        if (!s->synced && s->common.speed) {
            delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
        }
    } while (delay_ns == 0 && next_sector < end);
258 259 260 261 262 263

    /* Allocate a MirrorOp that is used as an AIO callback.  */
    op = g_slice_new(MirrorOp);
    op->s = s;
    op->sector_num = sector_num;
    op->nb_sectors = nb_sectors;
264 265 266 267 268 269 270 271

    /* Now make a QEMUIOVector taking enough granularity-sized chunks
     * from s->buf_free.
     */
    qemu_iovec_init(&op->qiov, nb_chunks);
    next_sector = sector_num;
    while (nb_chunks-- > 0) {
        MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
272 273
        size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size;

274 275
        QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
        s->buf_free_count--;
276
        qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
277 278 279 280

        /* Advance the HBitmapIter in parallel, so that we do not examine
         * the same sector twice.
         */
F
Fam Zheng 已提交
281 282
        if (next_sector > hbitmap_next_sector
            && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
283 284 285 286 287
            hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
        }

        next_sector += sectors_per_chunk;
    }
288

289
    bdrv_reset_dirty(source, sector_num, nb_sectors);
P
Paolo Bonzini 已提交
290 291

    /* Copy the dirty cluster.  */
292
    s->in_flight++;
M
Max Reitz 已提交
293
    s->sectors_in_flight += nb_sectors;
294
    trace_mirror_one_iteration(s, sector_num, nb_sectors);
295 296
    bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
                   mirror_read_complete, op);
297
    return delay_ns;
298
}
299

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
static void mirror_free_init(MirrorBlockJob *s)
{
    int granularity = s->granularity;
    size_t buf_size = s->buf_size;
    uint8_t *buf = s->buf;

    assert(s->buf_free_count == 0);
    QSIMPLEQ_INIT(&s->buf_free);
    while (buf_size != 0) {
        MirrorBuffer *cur = (MirrorBuffer *)buf;
        QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
        s->buf_free_count++;
        buf_size -= granularity;
        buf += granularity;
    }
}

317 318 319 320 321
static void mirror_drain(MirrorBlockJob *s)
{
    while (s->in_flight > 0) {
        qemu_coroutine_yield();
    }
P
Paolo Bonzini 已提交
322 323 324 325 326 327
}

static void coroutine_fn mirror_run(void *opaque)
{
    MirrorBlockJob *s = opaque;
    BlockDriverState *bs = s->common.bs;
328
    int64_t sector_num, end, sectors_per_chunk, length;
329
    uint64_t last_pause_ns;
330 331
    BlockDriverInfo bdi;
    char backing_filename[1024];
P
Paolo Bonzini 已提交
332 333 334 335 336 337 338
    int ret = 0;
    int n;

    if (block_job_is_cancelled(&s->common)) {
        goto immediate_exit;
    }

M
Max Reitz 已提交
339 340 341
    s->bdev_length = bdrv_getlength(bs);
    if (s->bdev_length < 0) {
        ret = s->bdev_length;
342
        goto immediate_exit;
M
Max Reitz 已提交
343
    } else if (s->bdev_length == 0) {
344 345 346 347 348 349 350 351
        /* Report BLOCK_JOB_READY and wait for complete. */
        block_job_event_ready(&s->common);
        s->synced = true;
        while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
            block_job_yield(&s->common);
        }
        s->common.cancelled = false;
        goto immediate_exit;
P
Paolo Bonzini 已提交
352 353
    }

M
Max Reitz 已提交
354
    length = DIV_ROUND_UP(s->bdev_length, s->granularity);
355 356
    s->in_flight_bitmap = bitmap_new(length);

357 358 359 360 361 362 363
    /* If we have no backing file yet in the destination, we cannot let
     * the destination do COW.  Instead, we copy sectors around the
     * dirty data if needed.  We need a bitmap to do that.
     */
    bdrv_get_backing_filename(s->target, backing_filename,
                              sizeof(backing_filename));
    if (backing_filename[0] && !s->target->backing_hd) {
364 365 366 367
        ret = bdrv_get_info(s->target, &bdi);
        if (ret < 0) {
            goto immediate_exit;
        }
368
        if (s->granularity < bdi.cluster_size) {
369
            s->buf_size = MAX(s->buf_size, bdi.cluster_size);
370 371 372 373
            s->cow_bitmap = bitmap_new(length);
        }
    }

M
Max Reitz 已提交
374
    end = s->bdev_length / BDRV_SECTOR_SIZE;
375 376 377 378 379 380
    s->buf = qemu_try_blockalign(bs, s->buf_size);
    if (s->buf == NULL) {
        ret = -ENOMEM;
        goto immediate_exit;
    }

381
    sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
382
    mirror_free_init(s);
P
Paolo Bonzini 已提交
383

F
Fam Zheng 已提交
384
    if (!s->is_none_mode) {
P
Paolo Bonzini 已提交
385
        /* First part, loop on the sectors and initialize the dirty bitmap.  */
F
Fam Zheng 已提交
386
        BlockDriverState *base = s->base;
P
Paolo Bonzini 已提交
387
        for (sector_num = 0; sector_num < end; ) {
388
            int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
389 390
            ret = bdrv_is_allocated_above(bs, base,
                                          sector_num, next - sector_num, &n);
P
Paolo Bonzini 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405

            if (ret < 0) {
                goto immediate_exit;
            }

            assert(n > 0);
            if (ret == 1) {
                bdrv_set_dirty(bs, sector_num, n);
                sector_num = next;
            } else {
                sector_num += n;
            }
        }
    }

F
Fam Zheng 已提交
406
    bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi);
407
    last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
P
Paolo Bonzini 已提交
408
    for (;;) {
409
        uint64_t delay_ns = 0;
P
Paolo Bonzini 已提交
410 411 412
        int64_t cnt;
        bool should_complete;

413 414 415 416 417
        if (s->ret < 0) {
            ret = s->ret;
            goto immediate_exit;
        }

F
Fam Zheng 已提交
418
        cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
M
Max Reitz 已提交
419 420 421 422 423 424
        /* s->common.offset contains the number of bytes already processed so
         * far, cnt is the number of dirty sectors remaining and
         * s->sectors_in_flight is the number of sectors currently being
         * processed; together those are the current total operation length */
        s->common.len = s->common.offset +
                        (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
425 426 427 428 429 430

        /* Note that even when no rate limit is applied we need to yield
         * periodically with no pending I/O so that qemu_aio_flush() returns.
         * We do so every SLICE_TIME nanoseconds, or when there is an error,
         * or when the source is clean, whichever comes first.
         */
431
        if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
432
            s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
433 434 435
            if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
                (cnt == 0 && s->in_flight > 0)) {
                trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
436 437 438
                qemu_coroutine_yield();
                continue;
            } else if (cnt != 0) {
439 440 441 442
                delay_ns = mirror_iteration(s);
                if (delay_ns == 0) {
                    continue;
                }
P
Paolo Bonzini 已提交
443 444 445 446
            }
        }

        should_complete = false;
447
        if (s->in_flight == 0 && cnt == 0) {
P
Paolo Bonzini 已提交
448 449 450
            trace_mirror_before_flush(s);
            ret = bdrv_flush(s->target);
            if (ret < 0) {
W
Wenchao Xia 已提交
451 452
                if (mirror_error_action(s, false, -ret) ==
                    BLOCK_ERROR_ACTION_REPORT) {
453 454 455 456 457 458 459 460 461
                    goto immediate_exit;
                }
            } else {
                /* We're out of the streaming phase.  From now on, if the job
                 * is cancelled we will actually complete all pending I/O and
                 * report completion.  This way, block-job-cancel will leave
                 * the target in a consistent state.
                 */
                if (!s->synced) {
462
                    block_job_event_ready(&s->common);
463 464 465 466 467
                    s->synced = true;
                }

                should_complete = s->should_complete ||
                    block_job_is_cancelled(&s->common);
F
Fam Zheng 已提交
468
                cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
P
Paolo Bonzini 已提交
469
            }
P
Paolo Bonzini 已提交
470 471 472 473 474 475 476 477 478 479 480 481 482
        }

        if (cnt == 0 && should_complete) {
            /* The dirty bitmap is not updated while operations are pending.
             * If we're about to exit, wait for pending operations before
             * calling bdrv_get_dirty_count(bs), or we may exit while the
             * source has dirty data to copy!
             *
             * Note that I/O can be submitted by the guest while
             * mirror_populate runs.
             */
            trace_mirror_before_drain(s, cnt);
            bdrv_drain_all();
F
Fam Zheng 已提交
483
            cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
P
Paolo Bonzini 已提交
484 485 486
        }

        ret = 0;
487
        trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
P
Paolo Bonzini 已提交
488
        if (!s->synced) {
489
            block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
P
Paolo Bonzini 已提交
490 491 492 493
            if (block_job_is_cancelled(&s->common)) {
                break;
            }
        } else if (!should_complete) {
494
            delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
495
            block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
P
Paolo Bonzini 已提交
496 497 498 499 500 501 502 503
        } else if (cnt == 0) {
            /* The two disks are in sync.  Exit and report successful
             * completion.
             */
            assert(QLIST_EMPTY(&bs->tracked_requests));
            s->common.cancelled = false;
            break;
        }
504
        last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
P
Paolo Bonzini 已提交
505 506 507
    }

immediate_exit:
508 509 510 511 512 513 514 515 516 517
    if (s->in_flight > 0) {
        /* We get here only if something went wrong.  Either the job failed,
         * or it was cancelled prematurely so that we do not guarantee that
         * the target is a copy of the source.
         */
        assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
        mirror_drain(s);
    }

    assert(s->in_flight == 0);
518
    qemu_vfree(s->buf);
519
    g_free(s->cow_bitmap);
520
    g_free(s->in_flight_bitmap);
F
Fam Zheng 已提交
521
    bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
522
    bdrv_iostatus_disable(s->target);
P
Paolo Bonzini 已提交
523
    if (s->should_complete && ret == 0) {
524 525 526
        BlockDriverState *to_replace = s->common.bs;
        if (s->to_replace) {
            to_replace = s->to_replace;
P
Paolo Bonzini 已提交
527
        }
528 529 530 531
        if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
            bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
        }
        bdrv_swap(s->target, to_replace);
F
Fam Zheng 已提交
532 533 534 535
        if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
            /* drop the bs loop chain formed by the swap: break the loop then
             * trigger the unref from the top one */
            BlockDriverState *p = s->base->backing_hd;
536
            bdrv_set_backing_hd(s->base, NULL);
F
Fam Zheng 已提交
537 538
            bdrv_unref(p);
        }
P
Paolo Bonzini 已提交
539
    }
540 541 542 543 544 545
    if (s->to_replace) {
        bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
        error_free(s->replace_blocker);
        bdrv_unref(s->to_replace);
    }
    g_free(s->replaces);
F
Fam Zheng 已提交
546
    bdrv_unref(s->target);
P
Paolo Bonzini 已提交
547 548 549 550 551 552 553 554 555 556 557 558 559 560
    block_job_completed(&s->common, ret);
}

static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
{
    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);

    if (speed < 0) {
        error_set(errp, QERR_INVALID_PARAMETER, "speed");
        return;
    }
    ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
}

561 562 563 564 565 566 567
static void mirror_iostatus_reset(BlockJob *job)
{
    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);

    bdrv_iostatus_reset(s->target);
}

P
Paolo Bonzini 已提交
568 569 570
static void mirror_complete(BlockJob *job, Error **errp)
{
    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
571
    Error *local_err = NULL;
P
Paolo Bonzini 已提交
572 573
    int ret;

574
    ret = bdrv_open_backing_file(s->target, NULL, &local_err);
P
Paolo Bonzini 已提交
575
    if (ret < 0) {
576
        error_propagate(errp, local_err);
P
Paolo Bonzini 已提交
577 578 579
        return;
    }
    if (!s->synced) {
580 581
        error_set(errp, QERR_BLOCK_JOB_NOT_READY,
                  bdrv_get_device_name(job->bs));
P
Paolo Bonzini 已提交
582 583 584
        return;
    }

585 586 587 588 589 590 591 592 593 594 595 596 597 598
    /* check the target bs is not blocked and block all operations on it */
    if (s->replaces) {
        s->to_replace = check_to_replace_node(s->replaces, &local_err);
        if (!s->to_replace) {
            error_propagate(errp, local_err);
            return;
        }

        error_setg(&s->replace_blocker,
                   "block device is in use by block-job-complete");
        bdrv_op_block_all(s->to_replace, s->replace_blocker);
        bdrv_ref(s->to_replace);
    }

P
Paolo Bonzini 已提交
599 600 601 602
    s->should_complete = true;
    block_job_resume(job);
}

603
static const BlockJobDriver mirror_job_driver = {
P
Paolo Bonzini 已提交
604
    .instance_size = sizeof(MirrorBlockJob),
F
Fam Zheng 已提交
605
    .job_type      = BLOCK_JOB_TYPE_MIRROR,
P
Paolo Bonzini 已提交
606
    .set_speed     = mirror_set_speed,
607
    .iostatus_reset= mirror_iostatus_reset,
P
Paolo Bonzini 已提交
608
    .complete      = mirror_complete,
P
Paolo Bonzini 已提交
609 610
};

F
Fam Zheng 已提交
611 612 613 614 615 616 617 618 619 620
static const BlockJobDriver commit_active_job_driver = {
    .instance_size = sizeof(MirrorBlockJob),
    .job_type      = BLOCK_JOB_TYPE_COMMIT,
    .set_speed     = mirror_set_speed,
    .iostatus_reset
                   = mirror_iostatus_reset,
    .complete      = mirror_complete,
};

static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
621 622 623 624 625
                             const char *replaces,
                             int64_t speed, int64_t granularity,
                             int64_t buf_size,
                             BlockdevOnError on_source_error,
                             BlockdevOnError on_target_error,
626
                             BlockCompletionFunc *cb,
627 628 629
                             void *opaque, Error **errp,
                             const BlockJobDriver *driver,
                             bool is_none_mode, BlockDriverState *base)
P
Paolo Bonzini 已提交
630 631 632
{
    MirrorBlockJob *s;

633 634 635 636 637 638 639 640 641 642 643 644 645 646
    if (granularity == 0) {
        /* Choose the default granularity based on the target file's cluster
         * size, clamped between 4k and 64k.  */
        BlockDriverInfo bdi;
        if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) {
            granularity = MAX(4096, bdi.cluster_size);
            granularity = MIN(65536, granularity);
        } else {
            granularity = 65536;
        }
    }

    assert ((granularity & (granularity - 1)) == 0);

647 648 649 650 651 652 653
    if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
         on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
        !bdrv_iostatus_is_enabled(bs)) {
        error_set(errp, QERR_INVALID_PARAMETER, "on-source-error");
        return;
    }

F
Fam Zheng 已提交
654

F
Fam Zheng 已提交
655
    s = block_job_create(driver, bs, speed, cb, opaque, errp);
P
Paolo Bonzini 已提交
656 657 658 659
    if (!s) {
        return;
    }

660
    s->replaces = g_strdup(replaces);
661 662
    s->on_source_error = on_source_error;
    s->on_target_error = on_target_error;
P
Paolo Bonzini 已提交
663
    s->target = target;
F
Fam Zheng 已提交
664
    s->is_none_mode = is_none_mode;
F
Fam Zheng 已提交
665
    s->base = base;
666
    s->granularity = granularity;
667
    s->buf_size = MAX(buf_size, granularity);
668

669 670 671 672
    s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp);
    if (!s->dirty_bitmap) {
        return;
    }
P
Paolo Bonzini 已提交
673
    bdrv_set_enable_write_cache(s->target, true);
674 675
    bdrv_set_on_error(s->target, on_target_error, on_target_error);
    bdrv_iostatus_enable(s->target);
P
Paolo Bonzini 已提交
676 677 678 679
    s->common.co = qemu_coroutine_create(mirror_run);
    trace_mirror_start(bs, s, s->common.co, opaque);
    qemu_coroutine_enter(s->common.co, s);
}
F
Fam Zheng 已提交
680 681

void mirror_start(BlockDriverState *bs, BlockDriverState *target,
682
                  const char *replaces,
F
Fam Zheng 已提交
683 684 685
                  int64_t speed, int64_t granularity, int64_t buf_size,
                  MirrorSyncMode mode, BlockdevOnError on_source_error,
                  BlockdevOnError on_target_error,
686
                  BlockCompletionFunc *cb,
F
Fam Zheng 已提交
687 688 689 690 691 692 693
                  void *opaque, Error **errp)
{
    bool is_none_mode;
    BlockDriverState *base;

    is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
    base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
694 695
    mirror_start_job(bs, target, replaces,
                     speed, granularity, buf_size,
F
Fam Zheng 已提交
696 697 698 699 700 701 702
                     on_source_error, on_target_error, cb, opaque, errp,
                     &mirror_job_driver, is_none_mode, base);
}

void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
                         int64_t speed,
                         BlockdevOnError on_error,
703
                         BlockCompletionFunc *cb,
F
Fam Zheng 已提交
704 705
                         void *opaque, Error **errp)
{
706 707
    int64_t length, base_length;
    int orig_base_flags;
708
    int ret;
709
    Error *local_err = NULL;
710 711 712

    orig_base_flags = bdrv_get_flags(base);

F
Fam Zheng 已提交
713 714 715
    if (bdrv_reopen(base, bs->open_flags, errp)) {
        return;
    }
716 717 718

    length = bdrv_getlength(bs);
    if (length < 0) {
719 720
        error_setg_errno(errp, -length,
                         "Unable to determine length of %s", bs->filename);
721 722 723 724 725
        goto error_restore_flags;
    }

    base_length = bdrv_getlength(base);
    if (base_length < 0) {
726 727
        error_setg_errno(errp, -base_length,
                         "Unable to determine length of %s", base->filename);
728 729 730 731
        goto error_restore_flags;
    }

    if (length > base_length) {
732 733 734 735
        ret = bdrv_truncate(base, length);
        if (ret < 0) {
            error_setg_errno(errp, -ret,
                            "Top image %s is larger than base image %s, and "
736 737 738 739 740 741
                             "resize of base image failed",
                             bs->filename, base->filename);
            goto error_restore_flags;
        }
    }

F
Fam Zheng 已提交
742
    bdrv_ref(base);
743
    mirror_start_job(bs, base, NULL, speed, 0, 0,
744
                     on_error, on_error, cb, opaque, &local_err,
F
Fam Zheng 已提交
745
                     &commit_active_job_driver, false, base);
746
    if (local_err) {
747
        error_propagate(errp, local_err);
748 749 750 751 752 753 754 755 756 757
        goto error_restore_flags;
    }

    return;

error_restore_flags:
    /* ignore error and errp for bdrv_reopen, because we want to propagate
     * the original error */
    bdrv_reopen(base, orig_base_flags, NULL);
    return;
F
Fam Zheng 已提交
758
}