qed.c 50.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * QEMU Enhanced Disk Format
 *
 * Copyright IBM, Corp. 2010
 *
 * Authors:
 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
 * See the COPYING.LIB file in the top-level directory.
 *
 */

P
Peter Maydell 已提交
15
#include "qemu/osdep.h"
16
#include "qapi/error.h"
17
#include "qemu/timer.h"
18
#include "qemu/bswap.h"
S
Stefan Hajnoczi 已提交
19
#include "trace.h"
20
#include "qed.h"
21
#include "qapi/qmp/qerror.h"
22
#include "sysemu/block-backend.h"
23

S
Stefan Hajnoczi 已提交
24
static const AIOCBInfo qed_aiocb_info = {
S
Stefan Hajnoczi 已提交
25 26 27
    .aiocb_size         = sizeof(QEDAIOCB),
};

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
                          const char *filename)
{
    const QEDHeader *header = (const QEDHeader *)buf;

    if (buf_size < sizeof(*header)) {
        return 0;
    }
    if (le32_to_cpu(header->magic) != QED_MAGIC) {
        return 0;
    }
    return 100;
}

/**
 * Check whether an image format is raw
 *
 * @fmt:    Backing file format, may be NULL
 */
static bool qed_fmt_is_raw(const char *fmt)
{
    return fmt && strcmp(fmt, "raw") == 0;
}

static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
{
    cpu->magic = le32_to_cpu(le->magic);
    cpu->cluster_size = le32_to_cpu(le->cluster_size);
    cpu->table_size = le32_to_cpu(le->table_size);
    cpu->header_size = le32_to_cpu(le->header_size);
    cpu->features = le64_to_cpu(le->features);
    cpu->compat_features = le64_to_cpu(le->compat_features);
    cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
    cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
    cpu->image_size = le64_to_cpu(le->image_size);
    cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
    cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
}

static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
{
    le->magic = cpu_to_le32(cpu->magic);
    le->cluster_size = cpu_to_le32(cpu->cluster_size);
    le->table_size = cpu_to_le32(cpu->table_size);
    le->header_size = cpu_to_le32(cpu->header_size);
    le->features = cpu_to_le64(cpu->features);
    le->compat_features = cpu_to_le64(cpu->compat_features);
    le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
    le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
    le->image_size = cpu_to_le64(cpu->image_size);
    le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
    le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
}

82
int qed_write_header_sync(BDRVQEDState *s)
83 84 85 86 87
{
    QEDHeader le;
    int ret;

    qed_header_cpu_to_le(&s->header, &le);
88
    ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
89 90 91 92 93 94
    if (ret != sizeof(le)) {
        return ret;
    }
    return 0;
}

S
Stefan Hajnoczi 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
typedef struct {
    GenericCB gencb;
    BDRVQEDState *s;
    struct iovec iov;
    QEMUIOVector qiov;
    int nsectors;
    uint8_t *buf;
} QEDWriteHeaderCB;

static void qed_write_header_cb(void *opaque, int ret)
{
    QEDWriteHeaderCB *write_header_cb = opaque;

    qemu_vfree(write_header_cb->buf);
    gencb_complete(write_header_cb, ret);
}

static void qed_write_header_read_cb(void *opaque, int ret)
{
    QEDWriteHeaderCB *write_header_cb = opaque;
    BDRVQEDState *s = write_header_cb->s;

    if (ret) {
        qed_write_header_cb(write_header_cb, ret);
        return;
    }

    /* Update header */
    qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);

125
    bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
126 127
                    write_header_cb->nsectors, qed_write_header_cb,
                    write_header_cb);
S
Stefan Hajnoczi 已提交
128 129 130 131 132 133 134 135
}

/**
 * Update header in-place (does not rewrite backing filename or other strings)
 *
 * This function only updates known header fields in-place and does not affect
 * extra data after the QED header.
 */
136
static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
S
Stefan Hajnoczi 已提交
137 138 139 140 141 142 143 144
                             void *opaque)
{
    /* We must write full sectors for O_DIRECT but cannot necessarily generate
     * the data following the header if an unrecognized compat feature is
     * active.  Therefore, first read the sectors containing the header, update
     * them, and write back.
     */

L
Laurent Vivier 已提交
145
    int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
S
Stefan Hajnoczi 已提交
146 147 148 149 150 151 152 153 154 155 156
    size_t len = nsectors * BDRV_SECTOR_SIZE;
    QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
                                                    cb, opaque);

    write_header_cb->s = s;
    write_header_cb->nsectors = nsectors;
    write_header_cb->buf = qemu_blockalign(s->bs, len);
    write_header_cb->iov.iov_base = write_header_cb->buf;
    write_header_cb->iov.iov_len = len;
    qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);

157
    bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
158
                   qed_write_header_read_cb, write_header_cb);
S
Stefan Hajnoczi 已提交
159 160
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
{
    uint64_t table_entries;
    uint64_t l2_size;

    table_entries = (table_size * cluster_size) / sizeof(uint64_t);
    l2_size = table_entries * cluster_size;

    return l2_size * table_entries;
}

static bool qed_is_cluster_size_valid(uint32_t cluster_size)
{
    if (cluster_size < QED_MIN_CLUSTER_SIZE ||
        cluster_size > QED_MAX_CLUSTER_SIZE) {
        return false;
    }
    if (cluster_size & (cluster_size - 1)) {
        return false; /* not power of 2 */
    }
    return true;
}

static bool qed_is_table_size_valid(uint32_t table_size)
{
    if (table_size < QED_MIN_TABLE_SIZE ||
        table_size > QED_MAX_TABLE_SIZE) {
        return false;
    }
    if (table_size & (table_size - 1)) {
        return false; /* not power of 2 */
    }
    return true;
}

static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
                                    uint32_t table_size)
{
    if (image_size % BDRV_SECTOR_SIZE != 0) {
        return false; /* not multiple of sector size */
    }
    if (image_size > qed_max_image_size(cluster_size, table_size)) {
        return false; /* image is too large */
    }
    return true;
}

/**
 * Read a string of known length from the image file
 *
 * @file:       Image file
 * @offset:     File offset to start of string, in bytes
 * @n:          String length in bytes
 * @buf:        Destination buffer
 * @buflen:     Destination buffer length in bytes
 * @ret:        0 on success, -errno on failure
 *
 * The string is NUL-terminated.
 */
220
static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
221 222 223 224 225 226 227 228 229 230 231 232 233 234
                           char *buf, size_t buflen)
{
    int ret;
    if (n >= buflen) {
        return -EINVAL;
    }
    ret = bdrv_pread(file, offset, buf, n);
    if (ret < 0) {
        return ret;
    }
    buf[n] = '\0';
    return 0;
}

S
Stefan Hajnoczi 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
/**
 * Allocate new clusters
 *
 * @s:          QED state
 * @n:          Number of contiguous clusters to allocate
 * @ret:        Offset of first allocated cluster
 *
 * This function only produces the offset where the new clusters should be
 * written.  It updates BDRVQEDState but does not make any changes to the image
 * file.
 */
static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
{
    uint64_t offset = s->file_size;
    s->file_size += n * s->header.cluster_size;
    return offset;
}

253 254 255 256 257 258 259
QEDTable *qed_alloc_table(BDRVQEDState *s)
{
    /* Honor O_DIRECT memory alignment requirements */
    return qemu_blockalign(s->bs,
                           s->header.cluster_size * s->header.table_size);
}

S
Stefan Hajnoczi 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/**
 * Allocate a new zeroed L2 table
 */
static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
{
    CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);

    l2_table->table = qed_alloc_table(s);
    l2_table->offset = qed_alloc_clusters(s, s->header.table_size);

    memset(l2_table->table->offsets, 0,
           s->header.cluster_size * s->header.table_size);
    return l2_table;
}

275 276 277 278 279 280 281 282 283 284 285 286 287
static void qed_aio_next_io(QEDAIOCB *acb, int ret);

static void qed_aio_start_io(QEDAIOCB *acb)
{
    qed_aio_next_io(acb, 0);
}

static void qed_aio_next_io_cb(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;

    qed_aio_next_io(acb, ret);
}
S
Stefan Hajnoczi 已提交
288

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
{
    assert(!s->allocating_write_reqs_plugged);

    s->allocating_write_reqs_plugged = true;
}

static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
{
    QEDAIOCB *acb;

    assert(s->allocating_write_reqs_plugged);

    s->allocating_write_reqs_plugged = false;

    acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
    if (acb) {
306
        qed_aio_start_io(acb);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
    }
}

static void qed_finish_clear_need_check(void *opaque, int ret)
{
    /* Do nothing */
}

static void qed_flush_after_clear_need_check(void *opaque, int ret)
{
    BDRVQEDState *s = opaque;

    bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);

    /* No need to wait until flush completes */
    qed_unplug_allocating_write_reqs(s);
}

static void qed_clear_need_check(void *opaque, int ret)
{
    BDRVQEDState *s = opaque;

    if (ret) {
        qed_unplug_allocating_write_reqs(s);
        return;
    }

    s->header.features &= ~QED_F_NEED_CHECK;
    qed_write_header(s, qed_flush_after_clear_need_check, s);
}

static void qed_need_check_timer_cb(void *opaque)
{
    BDRVQEDState *s = opaque;

    /* The timer should only fire when allocating writes have drained */
    assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));

    trace_qed_need_check_timer_cb(s);

347
    qed_acquire(s);
348 349 350
    qed_plug_allocating_write_reqs(s);

    /* Ensure writes are on disk before clearing flag */
F
Fam Zheng 已提交
351
    bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s);
352 353 354 355 356 357 358 359 360 361 362
    qed_release(s);
}

void qed_acquire(BDRVQEDState *s)
{
    aio_context_acquire(bdrv_get_aio_context(s->bs));
}

void qed_release(BDRVQEDState *s)
{
    aio_context_release(bdrv_get_aio_context(s->bs));
363 364 365 366 367 368
}

static void qed_start_need_check_timer(BDRVQEDState *s)
{
    trace_qed_start_need_check_timer(s);

369
    /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
370 371
     * migration.
     */
372
    timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
373
                   NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
374 375 376 377 378 379
}

/* It's okay to call this multiple times or when no timer is started */
static void qed_cancel_need_check_timer(BDRVQEDState *s)
{
    trace_qed_cancel_need_check_timer(s);
380
    timer_del(s->need_check_timer);
381 382
}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
{
    BDRVQEDState *s = bs->opaque;

    qed_cancel_need_check_timer(s);
    timer_free(s->need_check_timer);
}

static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
                                        AioContext *new_context)
{
    BDRVQEDState *s = bs->opaque;

    s->need_check_timer = aio_timer_new(new_context,
                                        QEMU_CLOCK_VIRTUAL, SCALE_NS,
                                        qed_need_check_timer_cb, s);
    if (s->header.features & QED_F_NEED_CHECK) {
        qed_start_need_check_timer(s);
    }
}

F
Fam Zheng 已提交
404 405 406 407 408 409 410 411 412 413 414 415 416
static void bdrv_qed_drain(BlockDriverState *bs)
{
    BDRVQEDState *s = bs->opaque;

    /* Fire the timer immediately in order to start doing I/O as soon as the
     * header is flushed.
     */
    if (s->need_check_timer && timer_pending(s->need_check_timer)) {
        qed_cancel_need_check_timer(s);
        qed_need_check_timer_cb(s);
    }
}

417 418
static int bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags,
                            Error **errp)
419 420 421 422 423 424 425
{
    BDRVQEDState *s = bs->opaque;
    QEDHeader le_header;
    int64_t file_size;
    int ret;

    s->bs = bs;
S
Stefan Hajnoczi 已提交
426
    QSIMPLEQ_INIT(&s->allocating_write_reqs);
427

428
    ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
429 430 431 432 433 434
    if (ret < 0) {
        return ret;
    }
    qed_header_le_to_cpu(&le_header, &s->header);

    if (s->header.magic != QED_MAGIC) {
P
Paolo Bonzini 已提交
435 436
        error_setg(errp, "Image not in QED format");
        return -EINVAL;
437 438
    }
    if (s->header.features & ~QED_FEATURE_MASK) {
439
        /* image uses unsupported feature bits */
440 441
        error_setg(errp, "Unsupported QED features: %" PRIx64,
                   s->header.features & ~QED_FEATURE_MASK);
442
        return -ENOTSUP;
443 444 445 446 447 448
    }
    if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
        return -EINVAL;
    }

    /* Round down file size to the last cluster */
K
Kevin Wolf 已提交
449
    file_size = bdrv_getlength(bs->file->bs);
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
    if (file_size < 0) {
        return file_size;
    }
    s->file_size = qed_start_of_cluster(s, file_size);

    if (!qed_is_table_size_valid(s->header.table_size)) {
        return -EINVAL;
    }
    if (!qed_is_image_size_valid(s->header.image_size,
                                 s->header.cluster_size,
                                 s->header.table_size)) {
        return -EINVAL;
    }
    if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
        return -EINVAL;
    }

    s->table_nelems = (s->header.cluster_size * s->header.table_size) /
                      sizeof(uint64_t);
469
    s->l2_shift = ctz32(s->header.cluster_size);
470
    s->l2_mask = s->table_nelems - 1;
471
    s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
472

473 474 475 476 477
    /* Header size calculation must not overflow uint32_t */
    if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
        return -EINVAL;
    }

478 479 480 481 482 483 484
    if ((s->header.features & QED_F_BACKING_FILE)) {
        if ((uint64_t)s->header.backing_filename_offset +
            s->header.backing_filename_size >
            s->header.cluster_size * s->header.header_size) {
            return -EINVAL;
        }

485
        ret = qed_read_string(bs->file, s->header.backing_filename_offset,
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
                              s->header.backing_filename_size, bs->backing_file,
                              sizeof(bs->backing_file));
        if (ret < 0) {
            return ret;
        }

        if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
            pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
        }
    }

    /* Reset unknown autoclear feature bits.  This is a backwards
     * compatibility mechanism that allows images to be opened by older
     * programs, which "knock out" unknown feature bits.  When an image is
     * opened by a newer program again it can detect that the autoclear
     * feature is no longer valid.
     */
    if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
504
        !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
505 506 507 508 509 510 511 512
        s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;

        ret = qed_write_header_sync(s);
        if (ret) {
            return ret;
        }

        /* From here on only known autoclear feature bits are valid */
K
Kevin Wolf 已提交
513
        bdrv_flush(bs->file->bs);
514 515
    }

516 517 518 519
    s->l1_table = qed_alloc_table(s);
    qed_init_l2_cache(&s->l2_cache);

    ret = qed_read_l1_table_sync(s);
S
Stefan Hajnoczi 已提交
520 521 522 523 524
    if (ret) {
        goto out;
    }

    /* If image was not closed cleanly, check consistency */
525
    if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
S
Stefan Hajnoczi 已提交
526 527 528 529 530
        /* Read-only images cannot be fixed.  There is no risk of corruption
         * since write operations are not possible.  Therefore, allow
         * potentially inconsistent images to be opened read-only.  This can
         * aid data recovery from an otherwise inconsistent image.
         */
K
Kevin Wolf 已提交
531
        if (!bdrv_is_read_only(bs->file->bs) &&
532
            !(flags & BDRV_O_INACTIVE)) {
S
Stefan Hajnoczi 已提交
533 534 535
            BdrvCheckResult result = {0};

            ret = qed_check(s, &result, true);
536 537 538
            if (ret) {
                goto out;
            }
S
Stefan Hajnoczi 已提交
539 540 541
        }
    }

542
    bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
543

S
Stefan Hajnoczi 已提交
544
out:
545 546 547 548
    if (ret) {
        qed_free_l2_cache(&s->l2_cache);
        qemu_vfree(s->l1_table);
    }
549 550 551
    return ret;
}

552 553 554 555 556 557 558 559 560 561 562 563
static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
                         Error **errp)
{
    bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
                               false, errp);
    if (!bs->file) {
        return -EINVAL;
    }

    return bdrv_qed_do_open(bs, options, flags, errp);
}

564
static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
565 566 567
{
    BDRVQEDState *s = bs->opaque;

568
    bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
569 570
}

J
Jeff Cody 已提交
571 572 573 574 575 576 577 578
/* We have nothing to do for QED reopen, stubs just return
 * success */
static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
                                   BlockReopenQueue *queue, Error **errp)
{
    return 0;
}

579 580
static void bdrv_qed_close(BlockDriverState *bs)
{
581 582
    BDRVQEDState *s = bs->opaque;

583
    bdrv_qed_detach_aio_context(bs);
584

S
Stefan Hajnoczi 已提交
585
    /* Ensure writes reach stable storage */
K
Kevin Wolf 已提交
586
    bdrv_flush(bs->file->bs);
S
Stefan Hajnoczi 已提交
587 588 589 590 591 592 593

    /* Clean shutdown, no check required on next open */
    if (s->header.features & QED_F_NEED_CHECK) {
        s->header.features &= ~QED_F_NEED_CHECK;
        qed_write_header_sync(s);
    }

594 595
    qed_free_l2_cache(&s->l2_cache);
    qemu_vfree(s->l1_table);
596 597 598 599
}

static int qed_create(const char *filename, uint32_t cluster_size,
                      uint64_t image_size, uint32_t table_size,
P
Paolo Bonzini 已提交
600
                      const char *backing_file, const char *backing_fmt,
601
                      QemuOpts *opts, Error **errp)
602 603 604 605 606 607 608 609 610 611 612 613 614 615
{
    QEDHeader header = {
        .magic = QED_MAGIC,
        .cluster_size = cluster_size,
        .table_size = table_size,
        .header_size = 1,
        .features = 0,
        .compat_features = 0,
        .l1_table_offset = cluster_size,
        .image_size = image_size,
    };
    QEDHeader le_header;
    uint8_t *l1_table = NULL;
    size_t l1_size = header.cluster_size * header.table_size;
616
    Error *local_err = NULL;
617
    int ret = 0;
618
    BlockBackend *blk;
619

620
    ret = bdrv_create_file(filename, opts, &local_err);
621
    if (ret < 0) {
P
Paolo Bonzini 已提交
622
        error_propagate(errp, local_err);
623 624 625
        return ret;
    }

626
    blk = blk_new_open(filename, NULL, NULL,
627 628
                       BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
                       &local_err);
629
    if (blk == NULL) {
P
Paolo Bonzini 已提交
630
        error_propagate(errp, local_err);
631
        return -EIO;
632 633
    }

634 635
    blk_set_allow_write_beyond_eof(blk, true);

636
    /* File must start empty and grow, check truncate is supported */
637
    ret = blk_truncate(blk, 0, errp);
638 639 640 641
    if (ret < 0) {
        goto out;
    }

642 643 644 645 646 647 648 649 650 651 652
    if (backing_file) {
        header.features |= QED_F_BACKING_FILE;
        header.backing_filename_offset = sizeof(le_header);
        header.backing_filename_size = strlen(backing_file);

        if (qed_fmt_is_raw(backing_fmt)) {
            header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
        }
    }

    qed_header_cpu_to_le(&header, &le_header);
653
    ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
654 655 656
    if (ret < 0) {
        goto out;
    }
657
    ret = blk_pwrite(blk, sizeof(le_header), backing_file,
658
                     header.backing_filename_size, 0);
659 660 661 662
    if (ret < 0) {
        goto out;
    }

663
    l1_table = g_malloc0(l1_size);
664
    ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
665 666 667 668 669 670
    if (ret < 0) {
        goto out;
    }

    ret = 0; /* success */
out:
671
    g_free(l1_table);
672
    blk_unref(blk);
673 674 675
    return ret;
}

676
static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
677 678 679 680
{
    uint64_t image_size = 0;
    uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
    uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
681 682 683 684
    char *backing_file = NULL;
    char *backing_fmt = NULL;
    int ret;

685 686
    image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
                          BDRV_SECTOR_SIZE);
687 688 689 690 691 692 693
    backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
    backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
    cluster_size = qemu_opt_get_size_del(opts,
                                         BLOCK_OPT_CLUSTER_SIZE,
                                         QED_DEFAULT_CLUSTER_SIZE);
    table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
                                       QED_DEFAULT_TABLE_SIZE);
694 695

    if (!qed_is_cluster_size_valid(cluster_size)) {
696 697 698
        error_setg(errp, "QED cluster size must be within range [%u, %u] "
                         "and power of 2",
                   QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
699 700
        ret = -EINVAL;
        goto finish;
701 702
    }
    if (!qed_is_table_size_valid(table_size)) {
703 704 705
        error_setg(errp, "QED table size must be within range [%u, %u] "
                         "and power of 2",
                   QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
706 707
        ret = -EINVAL;
        goto finish;
708 709
    }
    if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
710 711 712
        error_setg(errp, "QED image size must be a non-zero multiple of "
                         "cluster size and less than %" PRIu64 " bytes",
                   qed_max_image_size(cluster_size, table_size));
713 714
        ret = -EINVAL;
        goto finish;
715 716
    }

717
    ret = qed_create(filename, cluster_size, image_size, table_size,
718
                     backing_file, backing_fmt, opts, errp);
719 720 721 722 723

finish:
    g_free(backing_file);
    g_free(backing_fmt);
    return ret;
724 725
}

726
typedef struct {
727
    BlockDriverState *bs;
728
    Coroutine *co;
729 730
    uint64_t pos;
    int64_t status;
731
    int *pnum;
732
    BlockDriverState **file;
733 734 735 736 737
} QEDIsAllocatedCB;

static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
{
    QEDIsAllocatedCB *cb = opaque;
738
    BDRVQEDState *s = cb->bs->opaque;
739
    *cb->pnum = len / BDRV_SECTOR_SIZE;
740 741 742 743
    switch (ret) {
    case QED_CLUSTER_FOUND:
        offset |= qed_offset_into_cluster(s, cb->pos);
        cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
744
        *cb->file = cb->bs->file->bs;
745 746 747 748 749 750 751 752 753 754 755 756 757 758
        break;
    case QED_CLUSTER_ZERO:
        cb->status = BDRV_BLOCK_ZERO;
        break;
    case QED_CLUSTER_L2:
    case QED_CLUSTER_L1:
        cb->status = 0;
        break;
    default:
        assert(ret < 0);
        cb->status = ret;
        break;
    }

759
    if (cb->co) {
760
        aio_co_wake(cb->co);
761
    }
762 763
}

764
static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
765
                                                 int64_t sector_num,
766 767
                                                 int nb_sectors, int *pnum,
                                                 BlockDriverState **file)
768
{
769 770 771
    BDRVQEDState *s = bs->opaque;
    size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
    QEDIsAllocatedCB cb = {
772 773 774
        .bs = bs,
        .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
        .status = BDRV_BLOCK_OFFSET_MASK,
775
        .pnum = pnum,
776
        .file = file,
777 778 779
    };
    QEDRequest request = { .l2_table = NULL };

780
    qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
781

782
    /* Now sleep if the callback wasn't invoked immediately */
783
    while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
784 785
        cb.co = qemu_coroutine_self();
        qemu_coroutine_yield();
786 787 788 789
    }

    qed_unref_l2_cache_entry(request.l2_table);

790
    return cb.status;
791 792
}

S
Stefan Hajnoczi 已提交
793 794 795 796 797 798 799 800
static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
{
    return acb->common.bs->opaque;
}

/**
 * Read from the backing file or zero-fill if no backing file
 *
801 802 803 804 805 806
 * @s:              QED state
 * @pos:            Byte position in device
 * @qiov:           Destination I/O vector
 * @backing_qiov:   Possibly shortened copy of qiov, to be allocated here
 * @cb:             Completion function
 * @opaque:         User data for completion function
S
Stefan Hajnoczi 已提交
807 808 809 810 811 812
 *
 * This function reads qiov->size bytes starting at pos from the backing file.
 * If there is no backing file then zeroes are read.
 */
static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
                                  QEMUIOVector *qiov,
813
                                  QEMUIOVector **backing_qiov,
814
                                  BlockCompletionFunc *cb, void *opaque)
S
Stefan Hajnoczi 已提交
815 816 817 818 819 820 821
{
    uint64_t backing_length = 0;
    size_t size;

    /* If there is a backing file, get its length.  Treat the absence of a
     * backing file like a zero length backing file.
     */
822 823
    if (s->bs->backing) {
        int64_t l = bdrv_getlength(s->bs->backing->bs);
S
Stefan Hajnoczi 已提交
824 825 826 827 828 829 830 831 832 833
        if (l < 0) {
            cb(opaque, l);
            return;
        }
        backing_length = l;
    }

    /* Zero all sectors if reading beyond the end of the backing file */
    if (pos >= backing_length ||
        pos + qiov->size > backing_length) {
834
        qemu_iovec_memset(qiov, 0, 0, qiov->size);
S
Stefan Hajnoczi 已提交
835 836 837 838 839 840 841 842 843 844 845
    }

    /* Complete now if there are no backing file sectors to read */
    if (pos >= backing_length) {
        cb(opaque, 0);
        return;
    }

    /* If the read straddles the end of the backing file, shorten it */
    size = MIN((uint64_t)backing_length - pos, qiov->size);

846 847 848 849 850
    assert(*backing_qiov == NULL);
    *backing_qiov = g_new(QEMUIOVector, 1);
    qemu_iovec_init(*backing_qiov, qiov->niov);
    qemu_iovec_concat(*backing_qiov, qiov, 0, size);

P
Paolo Bonzini 已提交
851
    BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
852
    bdrv_aio_readv(s->bs->backing, pos / BDRV_SECTOR_SIZE,
853
                   *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
S
Stefan Hajnoczi 已提交
854 855 856 857 858 859
}

typedef struct {
    GenericCB gencb;
    BDRVQEDState *s;
    QEMUIOVector qiov;
860
    QEMUIOVector *backing_qiov;
S
Stefan Hajnoczi 已提交
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
    struct iovec iov;
    uint64_t offset;
} CopyFromBackingFileCB;

static void qed_copy_from_backing_file_cb(void *opaque, int ret)
{
    CopyFromBackingFileCB *copy_cb = opaque;
    qemu_vfree(copy_cb->iov.iov_base);
    gencb_complete(&copy_cb->gencb, ret);
}

static void qed_copy_from_backing_file_write(void *opaque, int ret)
{
    CopyFromBackingFileCB *copy_cb = opaque;
    BDRVQEDState *s = copy_cb->s;

877 878 879 880 881 882
    if (copy_cb->backing_qiov) {
        qemu_iovec_destroy(copy_cb->backing_qiov);
        g_free(copy_cb->backing_qiov);
        copy_cb->backing_qiov = NULL;
    }

S
Stefan Hajnoczi 已提交
883 884 885 886 887 888
    if (ret) {
        qed_copy_from_backing_file_cb(copy_cb, ret);
        return;
    }

    BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
889
    bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
890 891
                    &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
                    qed_copy_from_backing_file_cb, copy_cb);
S
Stefan Hajnoczi 已提交
892 893 894 895 896 897 898 899 900 901 902 903 904 905
}

/**
 * Copy data from backing file into the image
 *
 * @s:          QED state
 * @pos:        Byte position in device
 * @len:        Number of bytes
 * @offset:     Byte offset in image file
 * @cb:         Completion function
 * @opaque:     User data for completion function
 */
static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
                                       uint64_t len, uint64_t offset,
906
                                       BlockCompletionFunc *cb,
S
Stefan Hajnoczi 已提交
907 908 909 910 911 912 913 914 915 916 917 918 919
                                       void *opaque)
{
    CopyFromBackingFileCB *copy_cb;

    /* Skip copy entirely if there is no work to do */
    if (len == 0) {
        cb(opaque, 0);
        return;
    }

    copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
    copy_cb->s = s;
    copy_cb->offset = offset;
920
    copy_cb->backing_qiov = NULL;
S
Stefan Hajnoczi 已提交
921 922 923 924
    copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
    copy_cb->iov.iov_len = len;
    qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);

925
    qed_read_backing_file(s, pos, &copy_cb->qiov, &copy_cb->backing_qiov,
S
Stefan Hajnoczi 已提交
926 927 928 929 930 931 932 933 934 935
                          qed_copy_from_backing_file_write, copy_cb);
}

/**
 * Link one or more contiguous clusters into a table
 *
 * @s:              QED state
 * @table:          L2 table
 * @index:          First cluster index
 * @n:              Number of contiguous clusters
936 937 938 939
 * @cluster:        First cluster offset
 *
 * The cluster offset may be an allocated byte offset in the image file, the
 * zero cluster marker, or the unallocated cluster marker.
S
Stefan Hajnoczi 已提交
940 941 942 943 944 945 946
 */
static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
                                unsigned int n, uint64_t cluster)
{
    int i;
    for (i = index; i < index + n; i++) {
        table->offsets[i] = cluster;
947 948 949 950
        if (!qed_offset_is_unalloc_cluster(cluster) &&
            !qed_offset_is_zero_cluster(cluster)) {
            cluster += s->header.cluster_size;
        }
S
Stefan Hajnoczi 已提交
951 952 953 954 955 956
    }
}

static void qed_aio_complete_bh(void *opaque)
{
    QEDAIOCB *acb = opaque;
957
    BDRVQEDState *s = acb_to_s(acb);
958
    BlockCompletionFunc *cb = acb->common.cb;
S
Stefan Hajnoczi 已提交
959 960 961
    void *user_opaque = acb->common.opaque;
    int ret = acb->bh_ret;

962
    qemu_aio_unref(acb);
S
Stefan Hajnoczi 已提交
963 964

    /* Invoke callback */
965
    qed_acquire(s);
S
Stefan Hajnoczi 已提交
966
    cb(user_opaque, ret);
967
    qed_release(s);
S
Stefan Hajnoczi 已提交
968 969
}

970 971 972 973 974
static void qed_resume_alloc_bh(void *opaque)
{
    qed_aio_start_io(opaque);
}

S
Stefan Hajnoczi 已提交
975 976 977 978 979 980 981 982 983 984
static void qed_aio_complete(QEDAIOCB *acb, int ret)
{
    BDRVQEDState *s = acb_to_s(acb);

    trace_qed_aio_complete(s, acb, ret);

    /* Free resources */
    qemu_iovec_destroy(&acb->cur_qiov);
    qed_unref_l2_cache_entry(acb->request.l2_table);

985 986 987 988 989 990
    /* Free the buffer we may have allocated for zero writes */
    if (acb->flags & QED_AIOCB_ZERO) {
        qemu_vfree(acb->qiov->iov[0].iov_base);
        acb->qiov->iov[0].iov_base = NULL;
    }

S
Stefan Hajnoczi 已提交
991 992
    /* Arrange for a bh to invoke the completion function */
    acb->bh_ret = ret;
P
Paolo Bonzini 已提交
993 994
    aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
                            qed_aio_complete_bh, acb);
S
Stefan Hajnoczi 已提交
995 996 997 998 999 1000 1001 1002

    /* Start next allocating write request waiting behind this one.  Note that
     * requests enqueue themselves when they first hit an unallocated cluster
     * but they wait until the entire request is finished before waking up the
     * next request in the queue.  This ensures that we don't cycle through
     * requests multiple times but rather finish one at a time completely.
     */
    if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1003
        QEDAIOCB *next_acb;
S
Stefan Hajnoczi 已提交
1004
        QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
1005 1006 1007 1008
        next_acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
        if (next_acb) {
            aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
                                    qed_resume_alloc_bh, next_acb);
1009 1010
        } else if (s->header.features & QED_F_NEED_CHECK) {
            qed_start_need_check_timer(s);
S
Stefan Hajnoczi 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
        }
    }
}

/**
 * Commit the current L2 table to the cache
 */
static void qed_commit_l2_update(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;
    BDRVQEDState *s = acb_to_s(acb);
    CachedL2Table *l2_table = acb->request.l2_table;
1023
    uint64_t l2_offset = l2_table->offset;
S
Stefan Hajnoczi 已提交
1024 1025 1026 1027 1028 1029

    qed_commit_l2_cache_entry(&s->l2_cache, l2_table);

    /* This is guaranteed to succeed because we just committed the entry to the
     * cache.
     */
1030
    acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
S
Stefan Hajnoczi 已提交
1031 1032
    assert(acb->request.l2_table != NULL);

1033
    qed_aio_next_io(acb, ret);
S
Stefan Hajnoczi 已提交
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
}

/**
 * Update L1 table with new L2 table offset and write it out
 */
static void qed_aio_write_l1_update(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;
    BDRVQEDState *s = acb_to_s(acb);
    int index;

    if (ret) {
        qed_aio_complete(acb, ret);
        return;
    }

    index = qed_l1_index(s, acb->cur_pos);
    s->l1_table->offsets[index] = acb->request.l2_table->offset;

    qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
}

/**
 * Update L2 table with new cluster offsets and write them out
 */
1059
static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
S
Stefan Hajnoczi 已提交
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
{
    BDRVQEDState *s = acb_to_s(acb);
    bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
    int index;

    if (ret) {
        goto err;
    }

    if (need_alloc) {
        qed_unref_l2_cache_entry(acb->request.l2_table);
        acb->request.l2_table = qed_new_l2_table(s);
    }

    index = qed_l2_index(s, acb->cur_pos);
    qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1076
                         offset);
S
Stefan Hajnoczi 已提交
1077 1078 1079 1080

    if (need_alloc) {
        /* Write out the whole new L2 table */
        qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
1081
                           qed_aio_write_l1_update, acb);
S
Stefan Hajnoczi 已提交
1082 1083 1084
    } else {
        /* Write out only the updated part of the L2 table */
        qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1085
                           qed_aio_next_io_cb, acb);
S
Stefan Hajnoczi 已提交
1086 1087 1088 1089 1090 1091 1092
    }
    return;

err:
    qed_aio_complete(acb, ret);
}

1093 1094 1095 1096 1097 1098
static void qed_aio_write_l2_update_cb(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;
    qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
}

S
Stefan Hajnoczi 已提交
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
/**
 * Flush new data clusters before updating the L2 table
 *
 * This flush is necessary when a backing file is in use.  A crash during an
 * allocating write could result in empty clusters in the image.  If the write
 * only touched a subregion of the cluster, then backing image sectors have
 * been lost in the untouched region.  The solution is to flush after writing a
 * new data cluster and before updating the L2 table.
 */
static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;
    BDRVQEDState *s = acb_to_s(acb);

K
Kevin Wolf 已提交
1113
    if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
S
Stefan Hajnoczi 已提交
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
        qed_aio_complete(acb, -EIO);
    }
}

/**
 * Write data to the image file
 */
static void qed_aio_write_main(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;
    BDRVQEDState *s = acb_to_s(acb);
    uint64_t offset = acb->cur_cluster +
                      qed_offset_into_cluster(s, acb->cur_pos);
1127
    BlockCompletionFunc *next_fn;
S
Stefan Hajnoczi 已提交
1128 1129 1130 1131 1132 1133 1134 1135 1136

    trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);

    if (ret) {
        qed_aio_complete(acb, ret);
        return;
    }

    if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1137
        next_fn = qed_aio_next_io_cb;
S
Stefan Hajnoczi 已提交
1138
    } else {
1139
        if (s->bs->backing) {
S
Stefan Hajnoczi 已提交
1140 1141
            next_fn = qed_aio_write_flush_before_l2_update;
        } else {
1142
            next_fn = qed_aio_write_l2_update_cb;
S
Stefan Hajnoczi 已提交
1143 1144 1145 1146
        }
    }

    BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1147
    bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1148 1149
                    &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
                    next_fn, acb);
S
Stefan Hajnoczi 已提交
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
}

/**
 * Populate back untouched region of new data cluster
 */
static void qed_aio_write_postfill(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;
    BDRVQEDState *s = acb_to_s(acb);
    uint64_t start = acb->cur_pos + acb->cur_qiov.size;
    uint64_t len =
        qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
    uint64_t offset = acb->cur_cluster +
                      qed_offset_into_cluster(s, acb->cur_pos) +
                      acb->cur_qiov.size;

    if (ret) {
        qed_aio_complete(acb, ret);
        return;
    }

    trace_qed_aio_write_postfill(s, acb, start, len, offset);
    qed_copy_from_backing_file(s, start, len, offset,
                                qed_aio_write_main, acb);
}

/**
 * Populate front untouched region of new data cluster
 */
static void qed_aio_write_prefill(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;
    BDRVQEDState *s = acb_to_s(acb);
    uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
    uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);

    trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
    qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
                                qed_aio_write_postfill, acb);
}

1191 1192 1193 1194 1195 1196
/**
 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
 */
static bool qed_should_set_need_check(BDRVQEDState *s)
{
    /* The flush before L2 update path ensures consistency */
1197
    if (s->bs->backing) {
1198 1199 1200 1201 1202 1203
        return false;
    }

    return !(s->header.features & QED_F_NEED_CHECK);
}

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
static void qed_aio_write_zero_cluster(void *opaque, int ret)
{
    QEDAIOCB *acb = opaque;

    if (ret) {
        qed_aio_complete(acb, ret);
        return;
    }

    qed_aio_write_l2_update(acb, 0, 1);
}

S
Stefan Hajnoczi 已提交
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
/**
 * Write new data cluster
 *
 * @acb:        Write request
 * @len:        Length in bytes
 *
 * This path is taken when writing to previously unallocated clusters.
 */
static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
{
    BDRVQEDState *s = acb_to_s(acb);
1227
    BlockCompletionFunc *cb;
S
Stefan Hajnoczi 已提交
1228

1229 1230 1231 1232 1233
    /* Cancel timer when the first allocating request comes in */
    if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
        qed_cancel_need_check_timer(s);
    }

S
Stefan Hajnoczi 已提交
1234 1235 1236 1237
    /* Freeze this request if another allocating write is in progress */
    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
        QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
    }
1238 1239
    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
        s->allocating_write_reqs_plugged) {
S
Stefan Hajnoczi 已提交
1240 1241 1242 1243 1244
        return; /* wait for existing request to finish */
    }

    acb->cur_nclusters = qed_bytes_to_clusters(s,
            qed_offset_into_cluster(s, acb->cur_pos) + len);
1245
    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
S
Stefan Hajnoczi 已提交
1246

1247 1248 1249
    if (acb->flags & QED_AIOCB_ZERO) {
        /* Skip ahead if the clusters are already zero */
        if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1250
            qed_aio_start_io(acb);
1251 1252 1253 1254 1255 1256 1257 1258 1259
            return;
        }

        cb = qed_aio_write_zero_cluster;
    } else {
        cb = qed_aio_write_prefill;
        acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
    }

1260 1261
    if (qed_should_set_need_check(s)) {
        s->header.features |= QED_F_NEED_CHECK;
1262
        qed_write_header(s, cb, acb);
1263
    } else {
1264
        cb(acb, 0);
S
Stefan Hajnoczi 已提交
1265
    }
S
Stefan Hajnoczi 已提交
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
}

/**
 * Write data cluster in place
 *
 * @acb:        Write request
 * @offset:     Cluster offset in bytes
 * @len:        Length in bytes
 *
 * This path is taken when writing to already allocated clusters.
 */
static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
{
1279 1280 1281 1282 1283
    /* Allocate buffer for zero writes */
    if (acb->flags & QED_AIOCB_ZERO) {
        struct iovec *iov = acb->qiov->iov;

        if (!iov->iov_base) {
1284 1285 1286 1287 1288
            iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
            if (iov->iov_base == NULL) {
                qed_aio_complete(acb, -ENOMEM);
                return;
            }
1289 1290 1291 1292
            memset(iov->iov_base, 0, iov->iov_len);
        }
    }

S
Stefan Hajnoczi 已提交
1293 1294
    /* Calculate the I/O vector */
    acb->cur_cluster = offset;
1295
    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
S
Stefan Hajnoczi 已提交
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327

    /* Do the actual write */
    qed_aio_write_main(acb, 0);
}

/**
 * Write data cluster
 *
 * @opaque:     Write request
 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
 *              or -errno
 * @offset:     Cluster offset in bytes
 * @len:        Length in bytes
 *
 * Callback from qed_find_cluster().
 */
static void qed_aio_write_data(void *opaque, int ret,
                               uint64_t offset, size_t len)
{
    QEDAIOCB *acb = opaque;

    trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);

    acb->find_cluster_ret = ret;

    switch (ret) {
    case QED_CLUSTER_FOUND:
        qed_aio_write_inplace(acb, offset, len);
        break;

    case QED_CLUSTER_L2:
    case QED_CLUSTER_L1:
1328
    case QED_CLUSTER_ZERO:
S
Stefan Hajnoczi 已提交
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
        qed_aio_write_alloc(acb, len);
        break;

    default:
        qed_aio_complete(acb, ret);
        break;
    }
}

/**
 * Read data cluster
 *
 * @opaque:     Read request
 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
 *              or -errno
 * @offset:     Cluster offset in bytes
 * @len:        Length in bytes
 *
 * Callback from qed_find_cluster().
 */
static void qed_aio_read_data(void *opaque, int ret,
                              uint64_t offset, size_t len)
{
    QEDAIOCB *acb = opaque;
    BDRVQEDState *s = acb_to_s(acb);
    BlockDriverState *bs = acb->common.bs;

    /* Adjust offset into cluster */
    offset += qed_offset_into_cluster(s, acb->cur_pos);

    trace_qed_aio_read_data(s, acb, ret, offset, len);

    if (ret < 0) {
        goto err;
    }

1365
    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
S
Stefan Hajnoczi 已提交
1366

1367 1368
    /* Handle zero cluster and backing file reads */
    if (ret == QED_CLUSTER_ZERO) {
1369
        qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1370
        qed_aio_start_io(acb);
1371 1372
        return;
    } else if (ret != QED_CLUSTER_FOUND) {
S
Stefan Hajnoczi 已提交
1373
        qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1374
                              &acb->backing_qiov, qed_aio_next_io_cb, acb);
S
Stefan Hajnoczi 已提交
1375 1376 1377 1378
        return;
    }

    BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1379
    bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1380
                   &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1381
                   qed_aio_next_io_cb, acb);
S
Stefan Hajnoczi 已提交
1382 1383 1384 1385 1386 1387 1388 1389 1390
    return;

err:
    qed_aio_complete(acb, ret);
}

/**
 * Begin next I/O or complete the request
 */
1391
static void qed_aio_next_io(QEDAIOCB *acb, int ret)
S
Stefan Hajnoczi 已提交
1392 1393
{
    BDRVQEDState *s = acb_to_s(acb);
1394 1395
    QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
                                qed_aio_write_data : qed_aio_read_data;
S
Stefan Hajnoczi 已提交
1396 1397 1398

    trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);

1399 1400 1401 1402 1403 1404
    if (acb->backing_qiov) {
        qemu_iovec_destroy(acb->backing_qiov);
        g_free(acb->backing_qiov);
        acb->backing_qiov = NULL;
    }

S
Stefan Hajnoczi 已提交
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
    /* Handle I/O error */
    if (ret) {
        qed_aio_complete(acb, ret);
        return;
    }

    acb->qiov_offset += acb->cur_qiov.size;
    acb->cur_pos += acb->cur_qiov.size;
    qemu_iovec_reset(&acb->cur_qiov);

    /* Complete request */
    if (acb->cur_pos >= acb->end_pos) {
        qed_aio_complete(acb, 0);
        return;
    }

    /* Find next cluster and start I/O */
    qed_find_cluster(s, &acb->request,
                      acb->cur_pos, acb->end_pos - acb->cur_pos,
                      io_fn, acb);
}

1427 1428 1429
static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
                                 int64_t sector_num,
                                 QEMUIOVector *qiov, int nb_sectors,
1430
                                 BlockCompletionFunc *cb,
1431
                                 void *opaque, int flags)
S
Stefan Hajnoczi 已提交
1432
{
S
Stefan Hajnoczi 已提交
1433
    QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
S
Stefan Hajnoczi 已提交
1434 1435

    trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1436
                        opaque, flags);
S
Stefan Hajnoczi 已提交
1437

1438
    acb->flags = flags;
S
Stefan Hajnoczi 已提交
1439 1440 1441 1442
    acb->qiov = qiov;
    acb->qiov_offset = 0;
    acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
    acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1443
    acb->backing_qiov = NULL;
S
Stefan Hajnoczi 已提交
1444 1445 1446 1447
    acb->request.l2_table = NULL;
    qemu_iovec_init(&acb->cur_qiov, qiov->niov);

    /* Start request */
1448
    qed_aio_start_io(acb);
S
Stefan Hajnoczi 已提交
1449 1450 1451
    return &acb->common;
}

1452 1453 1454
static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
                                      int64_t sector_num,
                                      QEMUIOVector *qiov, int nb_sectors,
1455
                                      BlockCompletionFunc *cb,
1456
                                      void *opaque)
1457
{
1458
    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1459 1460
}

1461 1462 1463
static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
                                       int64_t sector_num,
                                       QEMUIOVector *qiov, int nb_sectors,
1464
                                       BlockCompletionFunc *cb,
1465
                                       void *opaque)
1466
{
1467 1468
    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
                         opaque, QED_AIOCB_WRITE);
1469 1470
}

1471 1472 1473 1474 1475 1476
typedef struct {
    Coroutine *co;
    int ret;
    bool done;
} QEDWriteZeroesCB;

1477
static void coroutine_fn qed_co_pwrite_zeroes_cb(void *opaque, int ret)
1478 1479 1480 1481 1482 1483
{
    QEDWriteZeroesCB *cb = opaque;

    cb->done = true;
    cb->ret = ret;
    if (cb->co) {
1484
        aio_co_wake(cb->co);
1485 1486 1487
    }
}

1488 1489 1490 1491
static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
                                                  int64_t offset,
                                                  int count,
                                                  BdrvRequestFlags flags)
1492
{
1493
    BlockAIOCB *blockacb;
1494
    BDRVQEDState *s = bs->opaque;
1495 1496 1497 1498
    QEDWriteZeroesCB cb = { .done = false };
    QEMUIOVector qiov;
    struct iovec iov;

1499 1500 1501 1502
    /* Fall back if the request is not aligned */
    if (qed_offset_into_cluster(s, offset) ||
        qed_offset_into_cluster(s, count)) {
        return -ENOTSUP;
1503 1504
    }

1505 1506 1507
    /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
     * then it will be allocated during request processing.
     */
1508 1509
    iov.iov_base = NULL;
    iov.iov_len = count;
1510 1511

    qemu_iovec_init_external(&qiov, &iov, 1);
1512 1513 1514
    blockacb = qed_aio_setup(bs, offset >> BDRV_SECTOR_BITS, &qiov,
                             count >> BDRV_SECTOR_BITS,
                             qed_co_pwrite_zeroes_cb, &cb,
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
                             QED_AIOCB_WRITE | QED_AIOCB_ZERO);
    if (!blockacb) {
        return -EIO;
    }
    if (!cb.done) {
        cb.co = qemu_coroutine_self();
        qemu_coroutine_yield();
    }
    assert(cb.done);
    return cb.ret;
}

1527
static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset, Error **errp)
1528
{
1529 1530 1531 1532 1533 1534
    BDRVQEDState *s = bs->opaque;
    uint64_t old_image_size;
    int ret;

    if (!qed_is_image_size_valid(offset, s->header.cluster_size,
                                 s->header.table_size)) {
1535
        error_setg(errp, "Invalid image size specified");
1536 1537 1538 1539
        return -EINVAL;
    }

    if ((uint64_t)offset < s->header.image_size) {
1540
        error_setg(errp, "Shrinking images is currently not supported");
1541 1542 1543 1544 1545 1546 1547 1548
        return -ENOTSUP;
    }

    old_image_size = s->header.image_size;
    s->header.image_size = offset;
    ret = qed_write_header_sync(s);
    if (ret < 0) {
        s->header.image_size = old_image_size;
1549
        error_setg_errno(errp, -ret, "Failed to update the image size");
1550 1551
    }
    return ret;
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
}

static int64_t bdrv_qed_getlength(BlockDriverState *bs)
{
    BDRVQEDState *s = bs->opaque;
    return s->header.image_size;
}

static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
    BDRVQEDState *s = bs->opaque;

    memset(bdi, 0, sizeof(*bdi));
    bdi->cluster_size = s->header.cluster_size;
D
Dong Xu Wang 已提交
1566
    bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1567 1568
    bdi->unallocated_blocks_are_zero = true;
    bdi->can_write_zeroes_with_unmap = true;
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
    return 0;
}

static int bdrv_qed_change_backing_file(BlockDriverState *bs,
                                        const char *backing_file,
                                        const char *backing_fmt)
{
    BDRVQEDState *s = bs->opaque;
    QEDHeader new_header, le_header;
    void *buffer;
    size_t buffer_len, backing_file_len;
    int ret;

    /* Refuse to set backing filename if unknown compat feature bits are
     * active.  If the image uses an unknown compat feature then we may not
     * know the layout of data following the header structure and cannot safely
     * add a new string.
     */
    if (backing_file && (s->header.compat_features &
                         ~QED_COMPAT_FEATURE_MASK)) {
        return -ENOTSUP;
    }

    memcpy(&new_header, &s->header, sizeof(new_header));

    new_header.features &= ~(QED_F_BACKING_FILE |
                             QED_F_BACKING_FORMAT_NO_PROBE);

    /* Adjust feature flags */
    if (backing_file) {
        new_header.features |= QED_F_BACKING_FILE;

        if (qed_fmt_is_raw(backing_fmt)) {
            new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
        }
    }

    /* Calculate new header size */
    backing_file_len = 0;

    if (backing_file) {
        backing_file_len = strlen(backing_file);
    }

    buffer_len = sizeof(new_header);
    new_header.backing_filename_offset = buffer_len;
    new_header.backing_filename_size = backing_file_len;
    buffer_len += backing_file_len;

    /* Make sure we can rewrite header without failing */
    if (buffer_len > new_header.header_size * new_header.cluster_size) {
        return -ENOSPC;
    }

    /* Prepare new header */
1624
    buffer = g_malloc(buffer_len);
1625 1626 1627 1628 1629

    qed_header_cpu_to_le(&new_header, &le_header);
    memcpy(buffer, &le_header, sizeof(le_header));
    buffer_len = sizeof(le_header);

P
Pavel Borzenkov 已提交
1630 1631 1632 1633
    if (backing_file) {
        memcpy(buffer + buffer_len, backing_file, backing_file_len);
        buffer_len += backing_file_len;
    }
1634 1635

    /* Write new header */
1636
    ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1637
    g_free(buffer);
1638 1639 1640 1641 1642 1643
    if (ret == 0) {
        memcpy(&s->header, &new_header, sizeof(new_header));
    }
    return ret;
}

1644
static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1645 1646
{
    BDRVQEDState *s = bs->opaque;
1647 1648
    Error *local_err = NULL;
    int ret;
1649 1650

    bdrv_qed_close(bs);
1651

1652
    memset(s, 0, sizeof(BDRVQEDState));
1653
    ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
1654
    if (local_err) {
1655 1656
        error_propagate(errp, local_err);
        error_prepend(errp, "Could not reopen qed layer: ");
1657 1658 1659 1660 1661
        return;
    } else if (ret < 0) {
        error_setg_errno(errp, -ret, "Could not reopen qed layer");
        return;
    }
1662 1663
}

1664 1665
static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
                          BdrvCheckMode fix)
1666
{
S
Stefan Hajnoczi 已提交
1667 1668
    BDRVQEDState *s = bs->opaque;

1669
    return qed_check(s, result, !!fix);
1670 1671
}

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
static QemuOptsList qed_create_opts = {
    .name = "qed-create-opts",
    .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
    .desc = {
        {
            .name = BLOCK_OPT_SIZE,
            .type = QEMU_OPT_SIZE,
            .help = "Virtual disk size"
        },
        {
            .name = BLOCK_OPT_BACKING_FILE,
            .type = QEMU_OPT_STRING,
            .help = "File name of a base image"
        },
        {
            .name = BLOCK_OPT_BACKING_FMT,
            .type = QEMU_OPT_STRING,
            .help = "Image format of the base image"
        },
        {
            .name = BLOCK_OPT_CLUSTER_SIZE,
            .type = QEMU_OPT_SIZE,
            .help = "Cluster size (in bytes)",
            .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
        },
        {
            .name = BLOCK_OPT_TABLE_SIZE,
            .type = QEMU_OPT_SIZE,
            .help = "L1/L2 table size (in clusters)"
        },
        { /* end of list */ }
    }
1704 1705 1706 1707 1708
};

static BlockDriver bdrv_qed = {
    .format_name              = "qed",
    .instance_size            = sizeof(BDRVQEDState),
1709
    .create_opts              = &qed_create_opts,
1710
    .supports_backing         = true,
1711 1712 1713 1714

    .bdrv_probe               = bdrv_qed_probe,
    .bdrv_open                = bdrv_qed_open,
    .bdrv_close               = bdrv_qed_close,
J
Jeff Cody 已提交
1715
    .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1716
    .bdrv_child_perm          = bdrv_format_default_perms,
C
Chunyan Liu 已提交
1717
    .bdrv_create              = bdrv_qed_create,
1718
    .bdrv_has_zero_init       = bdrv_has_zero_init_1,
1719
    .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1720 1721
    .bdrv_aio_readv           = bdrv_qed_aio_readv,
    .bdrv_aio_writev          = bdrv_qed_aio_writev,
1722
    .bdrv_co_pwrite_zeroes    = bdrv_qed_co_pwrite_zeroes,
1723 1724 1725
    .bdrv_truncate            = bdrv_qed_truncate,
    .bdrv_getlength           = bdrv_qed_getlength,
    .bdrv_get_info            = bdrv_qed_get_info,
1726
    .bdrv_refresh_limits      = bdrv_qed_refresh_limits,
1727
    .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1728
    .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1729
    .bdrv_check               = bdrv_qed_check,
1730 1731
    .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
    .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
F
Fam Zheng 已提交
1732
    .bdrv_drain               = bdrv_qed_drain,
1733 1734 1735 1736 1737 1738 1739 1740
};

static void bdrv_qed_init(void)
{
    bdrv_register(&bdrv_qed);
}

block_init(bdrv_qed_init);