qcow2-cluster.c 27.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Block driver for the QCOW version 2 format
 *
 * Copyright (c) 2004-2006 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

#include <zlib.h>

#include "qemu-common.h"
#include "block_int.h"
#include "block/qcow2.h"

31
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size)
32 33 34 35
{
    BDRVQcowState *s = bs->opaque;
    int new_l1_size, new_l1_size2, ret, i;
    uint64_t *new_l1_table;
36
    int64_t new_l1_table_offset;
37 38
    uint8_t data[12];

39
    if (min_size <= s->l1_size)
40
        return 0;
41 42 43 44 45 46 47 48 49 50 51 52

    if (exact_size) {
        new_l1_size = min_size;
    } else {
        /* Bump size up to reduce the number of times we have to grow */
        new_l1_size = s->l1_size;
        if (new_l1_size == 0) {
            new_l1_size = 1;
        }
        while (min_size > new_l1_size) {
            new_l1_size = (new_l1_size * 3 + 1) / 2;
        }
53
    }
54

55 56 57 58 59
#ifdef DEBUG_ALLOC2
    printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
#endif

    new_l1_size2 = sizeof(uint64_t) * new_l1_size;
60
    new_l1_table = qemu_mallocz(align_offset(new_l1_size2, 512));
61 62 63
    memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));

    /* write new table (align to cluster) */
64
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
K
Kevin Wolf 已提交
65
    new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
66 67 68 69
    if (new_l1_table_offset < 0) {
        qemu_free(new_l1_table);
        return new_l1_table_offset;
    }
70
    bdrv_flush(bs->file);
71

72
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
73 74
    for(i = 0; i < s->l1_size; i++)
        new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
75 76
    ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
    if (ret < 0)
77 78 79 80 81
        goto fail;
    for(i = 0; i < s->l1_size; i++)
        new_l1_table[i] = be64_to_cpu(new_l1_table[i]);

    /* set new table */
82
    BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
83 84
    cpu_to_be32w((uint32_t*)data, new_l1_size);
    cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
85 86
    ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
    if (ret < 0) {
87
        goto fail;
88
    }
89
    qemu_free(s->l1_table);
K
Kevin Wolf 已提交
90
    qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
91 92 93 94 95
    s->l1_table_offset = new_l1_table_offset;
    s->l1_table = new_l1_table;
    s->l1_size = new_l1_size;
    return 0;
 fail:
96 97
    qemu_free(new_l1_table);
    qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
98
    return ret;
99 100
}

K
Kevin Wolf 已提交
101
void qcow2_l2_cache_reset(BlockDriverState *bs)
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
{
    BDRVQcowState *s = bs->opaque;

    memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
    memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
    memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
}

static inline int l2_cache_new_entry(BlockDriverState *bs)
{
    BDRVQcowState *s = bs->opaque;
    uint32_t min_count;
    int min_index, i;

    /* find a new entry in the least used one */
    min_index = 0;
    min_count = 0xffffffff;
    for(i = 0; i < L2_CACHE_SIZE; i++) {
        if (s->l2_cache_counts[i] < min_count) {
            min_count = s->l2_cache_counts[i];
            min_index = i;
        }
    }
    return min_index;
}

/*
 * seek_l2_table
 *
 * seek l2_offset in the l2_cache table
 * if not found, return NULL,
 * if found,
 *   increments the l2 cache hit count of the entry,
 *   if counter overflow, divide by two all counters
 *   return the pointer to the l2 cache entry
 *
 */

static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
{
    int i, j;

    for(i = 0; i < L2_CACHE_SIZE; i++) {
        if (l2_offset == s->l2_cache_offsets[i]) {
            /* increment the hit count */
            if (++s->l2_cache_counts[i] == 0xffffffff) {
                for(j = 0; j < L2_CACHE_SIZE; j++) {
                    s->l2_cache_counts[j] >>= 1;
                }
            }
            return s->l2_cache + (i << s->l2_bits);
        }
    }
    return NULL;
}

/*
 * l2_load
 *
 * Loads a L2 table into memory. If the table is in the cache, the cache
 * is used; otherwise the L2 table is loaded from the image file.
 *
 * Returns a pointer to the L2 table on success, or NULL if the read from
 * the image file failed.
 */

168 169
static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
    uint64_t **l2_table)
170 171 172
{
    BDRVQcowState *s = bs->opaque;
    int min_index;
173
    int ret;
174 175 176

    /* seek if the table for the given offset is in the cache */

177 178 179 180
    *l2_table = seek_l2_table(s, l2_offset);
    if (*l2_table != NULL) {
        return 0;
    }
181 182 183 184

    /* not found: load a new entry in the least used one */

    min_index = l2_cache_new_entry(bs);
185
    *l2_table = s->l2_cache + (min_index << s->l2_bits);
K
Kevin Wolf 已提交
186

187
    BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
188 189 190 191 192 193
    ret = bdrv_pread(bs->file, l2_offset, *l2_table,
        s->l2_size * sizeof(uint64_t));
    if (ret < 0) {
        return ret;
    }

194 195 196
    s->l2_cache_offsets[min_index] = l2_offset;
    s->l2_cache_counts[min_index] = 1;

197
    return 0;
198 199
}

K
Kevin Wolf 已提交
200 201 202 203 204
/*
 * Writes one sector of the L1 table to the disk (can't update single entries
 * and we really don't want bdrv_pread to perform a read-modify-write)
 */
#define L1_ENTRIES_PER_SECTOR (512 / 8)
205
static int write_l1_entry(BlockDriverState *bs, int l1_index)
K
Kevin Wolf 已提交
206
{
207
    BDRVQcowState *s = bs->opaque;
K
Kevin Wolf 已提交
208 209
    uint64_t buf[L1_ENTRIES_PER_SECTOR];
    int l1_start_index;
210
    int i, ret;
K
Kevin Wolf 已提交
211 212 213 214 215 216

    l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
    for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
        buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
    }

217
    BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
218
    ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
219 220 221
        buf, sizeof(buf));
    if (ret < 0) {
        return ret;
K
Kevin Wolf 已提交
222 223 224 225 226
    }

    return 0;
}

227 228 229 230 231 232 233 234 235 236
/*
 * l2_allocate
 *
 * Allocate a new l2 entry in the file. If l1_index points to an already
 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
 * table) copy the contents of the old L2 table into the newly allocated one.
 * Otherwise the new table is initialized with zeros.
 *
 */

237
static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
238 239 240
{
    BDRVQcowState *s = bs->opaque;
    int min_index;
K
Kevin Wolf 已提交
241
    uint64_t old_l2_offset;
K
Kevin Wolf 已提交
242 243
    uint64_t *l2_table;
    int64_t l2_offset;
244
    int ret;
245 246 247 248 249

    old_l2_offset = s->l1_table[l1_index];

    /* allocate a new l2 entry */

K
Kevin Wolf 已提交
250
    l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
251
    if (l2_offset < 0) {
252
        return l2_offset;
253
    }
254
    bdrv_flush(bs->file);
255 256 257 258 259 260 261 262 263 264 265

    /* allocate a new entry in the l2 cache */

    min_index = l2_cache_new_entry(bs);
    l2_table = s->l2_cache + (min_index << s->l2_bits);

    if (old_l2_offset == 0) {
        /* if there was no old l2 table, clear the new table */
        memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
    } else {
        /* if there was an old l2 table, read it from the disk */
266 267
        BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
        ret = bdrv_pread(bs->file, old_l2_offset, l2_table,
268 269
            s->l2_size * sizeof(uint64_t));
        if (ret < 0) {
270
            goto fail;
271
        }
272 273
    }
    /* write the l2 table to the file */
274
    BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
275
    ret = bdrv_pwrite_sync(bs->file, l2_offset, l2_table,
276 277
        s->l2_size * sizeof(uint64_t));
    if (ret < 0) {
278 279 280 281 282 283 284 285
        goto fail;
    }

    /* update the L1 entry */
    s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
    ret = write_l1_entry(bs, l1_index);
    if (ret < 0) {
        goto fail;
286
    }
287 288 289 290 291 292

    /* update the l2 cache entry */

    s->l2_cache_offsets[min_index] = l2_offset;
    s->l2_cache_counts[min_index] = 1;

293 294
    *table = l2_table;
    return 0;
295 296

fail:
297
    s->l1_table[l1_index] = old_l2_offset;
298 299
    qcow2_l2_cache_reset(bs);
    return ret;
300 301 302 303 304 305 306 307 308 309 310 311
}

static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
        uint64_t *l2_table, uint64_t start, uint64_t mask)
{
    int i;
    uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;

    if (!offset)
        return 0;

    for (i = start; i < start + nb_clusters; i++)
312
        if (offset + (uint64_t) i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
            break;

	return (i - start);
}

static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
{
    int i = 0;

    while(nb_clusters-- && l2_table[i] == 0)
        i++;

    return i;
}

/* The crypt function is compatible with the linux cryptoloop
   algorithm for < 4 GB images. NOTE: out_buf == in_buf is
   supported */
K
Kevin Wolf 已提交
331 332 333 334
void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
                           uint8_t *out_buf, const uint8_t *in_buf,
                           int nb_sectors, int enc,
                           const AES_KEY *key)
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
{
    union {
        uint64_t ll[2];
        uint8_t b[16];
    } ivec;
    int i;

    for(i = 0; i < nb_sectors; i++) {
        ivec.ll[0] = cpu_to_le64(sector_num);
        ivec.ll[1] = 0;
        AES_cbc_encrypt(in_buf, out_buf, 512, key,
                        ivec.b, enc);
        sector_num++;
        in_buf += 512;
        out_buf += 512;
    }
}


354 355
static int qcow_read(BlockDriverState *bs, int64_t sector_num,
                     uint8_t *buf, int nb_sectors)
356 357 358 359
{
    BDRVQcowState *s = bs->opaque;
    int ret, index_in_cluster, n, n1;
    uint64_t cluster_offset;
360 361
    struct iovec iov;
    QEMUIOVector qiov;
362 363 364

    while (nb_sectors > 0) {
        n = nb_sectors;
365 366 367 368 369 370 371

        ret = qcow2_get_cluster_offset(bs, sector_num << 9, &n,
            &cluster_offset);
        if (ret < 0) {
            return ret;
        }

372 373 374 375
        index_in_cluster = sector_num & (s->cluster_sectors - 1);
        if (!cluster_offset) {
            if (bs->backing_hd) {
                /* read from the base image */
376 377 378 379 380
                iov.iov_base = buf;
                iov.iov_len = n * 512;
                qemu_iovec_init_external(&qiov, &iov, 1);

                n1 = qcow2_backing_read1(bs->backing_hd, &qiov, sector_num, n);
381
                if (n1 > 0) {
382
                    BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING);
383 384 385 386 387 388 389 390
                    ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
                    if (ret < 0)
                        return -1;
                }
            } else {
                memset(buf, 0, 512 * n);
            }
        } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
391
            if (qcow2_decompress_cluster(bs, cluster_offset) < 0)
392 393 394
                return -1;
            memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
        } else {
395 396
            BLKDBG_EVENT(bs->file, BLKDBG_READ);
            ret = bdrv_pread(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512);
397 398 399
            if (ret != n * 512)
                return -1;
            if (s->crypt_method) {
K
Kevin Wolf 已提交
400
                qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
                                &s->aes_decrypt_key);
            }
        }
        nb_sectors -= n;
        sector_num += n;
        buf += n * 512;
    }
    return 0;
}

static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
                        uint64_t cluster_offset, int n_start, int n_end)
{
    BDRVQcowState *s = bs->opaque;
    int n, ret;

    n = n_end - n_start;
    if (n <= 0)
        return 0;
420
    BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
421
    ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
422 423 424
    if (ret < 0)
        return ret;
    if (s->crypt_method) {
K
Kevin Wolf 已提交
425
        qcow2_encrypt_sectors(s, start_sect + n_start,
426 427 428 429
                        s->cluster_data,
                        s->cluster_data, n, 1,
                        &s->aes_encrypt_key);
    }
430
    BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
431
    ret = bdrv_write(bs->file, (cluster_offset >> 9) + n_start,
432
        s->cluster_data, n);
433 434 435 436 437 438 439 440 441
    if (ret < 0)
        return ret;
    return 0;
}


/*
 * get_cluster_offset
 *
442 443
 * For a given offset of the disk image, find the cluster offset in
 * qcow2 file. The offset is stored in *cluster_offset.
444 445 446 447 448 449
 *
 * on entry, *num is the number of contiguous clusters we'd like to
 * access following offset.
 *
 * on exit, *num is the number of contiguous clusters we can read.
 *
450 451
 * Return 0, if the offset is found
 * Return -errno, otherwise.
452 453 454
 *
 */

455 456
int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
    int *num, uint64_t *cluster_offset)
457 458
{
    BDRVQcowState *s = bs->opaque;
459
    unsigned int l1_index, l2_index;
460
    uint64_t l2_offset, *l2_table;
461
    int l1_bits, c;
462 463
    unsigned int index_in_cluster, nb_clusters;
    uint64_t nb_available, nb_needed;
464
    int ret;
465 466 467 468 469 470 471 472 473 474

    index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
    nb_needed = *num + index_in_cluster;

    l1_bits = s->l2_bits + s->cluster_bits;

    /* compute how many bytes there are between the offset and
     * the end of the l1 entry
     */

475
    nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
476 477 478 479 480 481 482 483 484

    /* compute the number of available sectors */

    nb_available = (nb_available >> 9) + index_in_cluster;

    if (nb_needed > nb_available) {
        nb_needed = nb_available;
    }

485
    *cluster_offset = 0;
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502

    /* seek the the l2 offset in the l1 table */

    l1_index = offset >> l1_bits;
    if (l1_index >= s->l1_size)
        goto out;

    l2_offset = s->l1_table[l1_index];

    /* seek the l2 table of the given l2 offset */

    if (!l2_offset)
        goto out;

    /* load the l2 table in memory */

    l2_offset &= ~QCOW_OFLAG_COPIED;
503 504 505
    ret = l2_load(bs, l2_offset, &l2_table);
    if (ret < 0) {
        return ret;
506
    }
507 508 509 510

    /* find the cluster offset for the given disk offset */

    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
511
    *cluster_offset = be64_to_cpu(l2_table[l2_index]);
512 513
    nb_clusters = size_to_clusters(s, nb_needed << 9);

514
    if (!*cluster_offset) {
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
        /* how many empty clusters ? */
        c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
    } else {
        /* how many allocated clusters ? */
        c = count_contiguous_clusters(nb_clusters, s->cluster_size,
                &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
    }

   nb_available = (c * s->cluster_sectors);
out:
    if (nb_available > nb_needed)
        nb_available = nb_needed;

    *num = nb_available - index_in_cluster;

530 531
    *cluster_offset &=~QCOW_OFLAG_COPIED;
    return 0;
532 533 534 535 536 537 538 539 540 541 542
}

/*
 * get_cluster_table
 *
 * for a given disk offset, load (and allocate if needed)
 * the l2 table.
 *
 * the l2 table offset in the qcow2 file and the cluster index
 * in the l2 table are given to the caller.
 *
543
 * Returns 0 on success, -errno in failure case
544 545 546 547 548 549 550
 */
static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
                             uint64_t **new_l2_table,
                             uint64_t *new_l2_offset,
                             int *new_l2_index)
{
    BDRVQcowState *s = bs->opaque;
551
    unsigned int l1_index, l2_index;
552 553
    uint64_t l2_offset;
    uint64_t *l2_table = NULL;
554
    int ret;
555 556 557 558 559

    /* seek the the l2 offset in the l1 table */

    l1_index = offset >> (s->l2_bits + s->cluster_bits);
    if (l1_index >= s->l1_size) {
560
        ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
561 562 563
        if (ret < 0) {
            return ret;
        }
564 565 566 567 568 569 570 571
    }
    l2_offset = s->l1_table[l1_index];

    /* seek the l2 table of the given l2 offset */

    if (l2_offset & QCOW_OFLAG_COPIED) {
        /* load the l2 table in memory */
        l2_offset &= ~QCOW_OFLAG_COPIED;
572 573 574
        ret = l2_load(bs, l2_offset, &l2_table);
        if (ret < 0) {
            return ret;
575
        }
576 577
    } else {
        if (l2_offset)
K
Kevin Wolf 已提交
578
            qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
579 580 581
        ret = l2_allocate(bs, l1_index, &l2_table);
        if (ret < 0) {
            return ret;
582
        }
583 584 585 586 587 588 589 590 591 592 593
        l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
    }

    /* find the cluster offset for the given disk offset */

    l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);

    *new_l2_table = l2_table;
    *new_l2_offset = l2_offset;
    *new_l2_index = l2_index;

594
    return 0;
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
}

/*
 * alloc_compressed_cluster_offset
 *
 * For a given offset of the disk image, return cluster offset in
 * qcow2 file.
 *
 * If the offset is not found, allocate a new compressed cluster.
 *
 * Return the cluster offset if successful,
 * Return 0, otherwise.
 *
 */

K
Kevin Wolf 已提交
610 611 612
uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
                                               uint64_t offset,
                                               int compressed_size)
613 614 615
{
    BDRVQcowState *s = bs->opaque;
    int l2_index, ret;
K
Kevin Wolf 已提交
616 617
    uint64_t l2_offset, *l2_table;
    int64_t cluster_offset;
618 619 620
    int nb_csectors;

    ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
621
    if (ret < 0) {
622
        return 0;
623
    }
624 625 626 627 628 629

    cluster_offset = be64_to_cpu(l2_table[l2_index]);
    if (cluster_offset & QCOW_OFLAG_COPIED)
        return cluster_offset & ~QCOW_OFLAG_COPIED;

    if (cluster_offset)
K
Kevin Wolf 已提交
630
        qcow2_free_any_clusters(bs, cluster_offset, 1);
631

K
Kevin Wolf 已提交
632
    cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
633 634 635 636
    if (cluster_offset < 0) {
        return 0;
    }

637 638 639 640 641 642 643 644 645 646
    nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
                  (cluster_offset >> 9);

    cluster_offset |= QCOW_OFLAG_COMPRESSED |
                      ((uint64_t)nb_csectors << s->csize_shift);

    /* update L2 table */

    /* compressed clusters never have the copied flag */

647
    BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
648
    l2_table[l2_index] = cpu_to_be64(cluster_offset);
649
    if (bdrv_pwrite_sync(bs->file,
650 651
                    l2_offset + l2_index * sizeof(uint64_t),
                    l2_table + l2_index,
652
                    sizeof(uint64_t)) < 0)
653 654 655 656 657
        return 0;

    return cluster_offset;
}

658 659 660 661 662
/*
 * Write L2 table updates to disk, writing whole sectors to avoid a
 * read-modify-write in bdrv_pwrite
 */
#define L2_ENTRIES_PER_SECTOR (512 / 8)
663
static int write_l2_entries(BlockDriverState *bs, uint64_t *l2_table,
664 665 666 667 668 669
    uint64_t l2_offset, int l2_index, int num)
{
    int l2_start_index = l2_index & ~(L1_ENTRIES_PER_SECTOR - 1);
    int start_offset = (8 * l2_index) & ~511;
    int end_offset = (8 * (l2_index + num) + 511) & ~511;
    size_t len = end_offset - start_offset;
670
    int ret;
671

672
    BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
673
    ret = bdrv_pwrite(bs->file, l2_offset + start_offset,
674 675 676
        &l2_table[l2_start_index], len);
    if (ret < 0) {
        return ret;
677 678 679 680 681
    }

    return 0;
}

682
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
683 684 685 686
{
    BDRVQcowState *s = bs->opaque;
    int i, j = 0, l2_index, ret;
    uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
687
    uint64_t cluster_offset = m->cluster_offset;
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710

    if (m->nb_clusters == 0)
        return 0;

    old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));

    /* copy content of unmodified sectors */
    start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
    if (m->n_start) {
        ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
        if (ret < 0)
            goto err;
    }

    if (m->nb_available & (s->cluster_sectors - 1)) {
        uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
        ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
                m->nb_available - end, s->cluster_sectors);
        if (ret < 0)
            goto err;
    }

    /* update L2 table */
711 712
    ret = get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index);
    if (ret < 0) {
713
        goto err;
714
    }
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729

    for (i = 0; i < m->nb_clusters; i++) {
        /* if two concurrent writes happen to the same unallocated cluster
	 * each write allocates separate cluster and writes data concurrently.
	 * The first one to complete updates l2 table with pointer to its
	 * cluster the second one has to do RMW (which is done above by
	 * copy_sectors()), update l2 table with its cluster pointer and free
	 * old cluster. This is what this loop does */
        if(l2_table[l2_index + i] != 0)
            old_cluster[j++] = l2_table[l2_index + i];

        l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
                    (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
     }

730 731 732 733 734 735 736
    /*
     * Before we update the L2 table to actually point to the new cluster, we
     * need to be sure that the refcounts have been increased and COW was
     * handled.
     */
    bdrv_flush(bs->file);

737
    ret = write_l2_entries(bs, l2_table, l2_offset, l2_index, m->nb_clusters);
738
    if (ret < 0) {
739
        qcow2_l2_cache_reset(bs);
740
        goto err;
741
    }
742

743 744 745 746 747 748 749 750 751 752 753
    /*
     * If this was a COW, we need to decrease the refcount of the old cluster.
     * Also flush bs->file to get the right order for L2 and refcount update.
     */
    if (j != 0) {
        bdrv_flush(bs->file);
        for (i = 0; i < j; i++) {
            qcow2_free_any_clusters(bs,
                be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
        }
    }
754 755 756 757 758 759 760 761 762 763

    ret = 0;
err:
    qemu_free(old_cluster);
    return ret;
 }

/*
 * alloc_cluster_offset
 *
764
 * For a given offset of the disk image, return cluster offset in qcow2 file.
765 766
 * If the offset is not found, allocate a new cluster.
 *
767 768 769 770 771 772 773
 * If the cluster was already allocated, m->nb_clusters is set to 0,
 * m->depends_on is set to NULL and the other fields in m are meaningless.
 *
 * If the cluster is newly allocated, m->nb_clusters is set to the number of
 * contiguous clusters that have been allocated. This may be 0 if the request
 * conflict with another write request in flight; in this case, m->depends_on
 * is set and the remaining fields of m are meaningless.
774
 *
775 776 777 778
 * If m->nb_clusters is non-zero, the other fields of m are valid and contain
 * information about the first allocated cluster.
 *
 * Return 0 on success and -errno in error cases
779
 */
K
Kevin Wolf 已提交
780 781
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
    int n_start, int n_end, int *num, QCowL2Meta *m)
782 783 784
{
    BDRVQcowState *s = bs->opaque;
    int l2_index, ret;
785 786
    uint64_t l2_offset, *l2_table;
    int64_t cluster_offset;
787
    unsigned int nb_clusters, i = 0;
788
    QCowL2Meta *old_alloc;
789 790

    ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
791
    if (ret < 0) {
792
        return ret;
793
    }
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808

    nb_clusters = size_to_clusters(s, n_end << 9);

    nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);

    cluster_offset = be64_to_cpu(l2_table[l2_index]);

    /* We keep all QCOW_OFLAG_COPIED clusters */

    if (cluster_offset & QCOW_OFLAG_COPIED) {
        nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
                &l2_table[l2_index], 0, 0);

        cluster_offset &= ~QCOW_OFLAG_COPIED;
        m->nb_clusters = 0;
809
        m->depends_on = NULL;
810 811 812 813 814 815 816 817 818 819 820 821 822 823

        goto out;
    }

    /* for the moment, multiple compressed clusters are not managed */

    if (cluster_offset & QCOW_OFLAG_COMPRESSED)
        nb_clusters = 1;

    /* how many available clusters ? */

    while (i < nb_clusters) {
        i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
                &l2_table[l2_index], i, 0);
824
        if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) {
825
            break;
826
        }
827 828 829

        i += count_contiguous_free_clusters(nb_clusters - i,
                &l2_table[l2_index + i]);
830 831 832
        if (i >= nb_clusters) {
            break;
        }
833 834 835 836 837 838 839

        cluster_offset = be64_to_cpu(l2_table[l2_index + i]);

        if ((cluster_offset & QCOW_OFLAG_COPIED) ||
                (cluster_offset & QCOW_OFLAG_COMPRESSED))
            break;
    }
840
    assert(i <= nb_clusters);
841 842
    nb_clusters = i;

843 844 845 846 847
    /*
     * Check if there already is an AIO write request in flight which allocates
     * the same cluster. In this case we need to wait until the previous
     * request has completed and updated the L2 table accordingly.
     */
B
Blue Swirl 已提交
848
    QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878

        uint64_t end_offset = offset + nb_clusters * s->cluster_size;
        uint64_t old_offset = old_alloc->offset;
        uint64_t old_end_offset = old_alloc->offset +
            old_alloc->nb_clusters * s->cluster_size;

        if (end_offset < old_offset || offset > old_end_offset) {
            /* No intersection */
        } else {
            if (offset < old_offset) {
                /* Stop at the start of a running allocation */
                nb_clusters = (old_offset - offset) >> s->cluster_bits;
            } else {
                nb_clusters = 0;
            }

            if (nb_clusters == 0) {
                /* Set dependency and wait for a callback */
                m->depends_on = old_alloc;
                m->nb_clusters = 0;
                *num = 0;
                return 0;
            }
        }
    }

    if (!nb_clusters) {
        abort();
    }

B
Blue Swirl 已提交
879
    QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
880

881 882
    /* allocate a new cluster */

K
Kevin Wolf 已提交
883
    cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size);
884
    if (cluster_offset < 0) {
885
        QLIST_REMOVE(m, next_in_flight);
886 887
        return cluster_offset;
    }
888 889 890 891 892 893 894 895

    /* save info needed for meta data update */
    m->offset = offset;
    m->n_start = n_start;
    m->nb_clusters = nb_clusters;

out:
    m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
896
    m->cluster_offset = cluster_offset;
897 898 899

    *num = m->nb_available - n_start;

900
    return 0;
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
}

static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
                             const uint8_t *buf, int buf_size)
{
    z_stream strm1, *strm = &strm1;
    int ret, out_len;

    memset(strm, 0, sizeof(*strm));

    strm->next_in = (uint8_t *)buf;
    strm->avail_in = buf_size;
    strm->next_out = out_buf;
    strm->avail_out = out_buf_size;

    ret = inflateInit2(strm, -12);
    if (ret != Z_OK)
        return -1;
    ret = inflate(strm, Z_FINISH);
    out_len = strm->next_out - out_buf;
    if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
        out_len != out_buf_size) {
        inflateEnd(strm);
        return -1;
    }
    inflateEnd(strm);
    return 0;
}

930
int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
931
{
932
    BDRVQcowState *s = bs->opaque;
933 934 935 936 937 938 939 940
    int ret, csize, nb_csectors, sector_offset;
    uint64_t coffset;

    coffset = cluster_offset & s->cluster_offset_mask;
    if (s->cluster_cache_offset != coffset) {
        nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
        sector_offset = coffset & 511;
        csize = nb_csectors * 512 - sector_offset;
941 942
        BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
        ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
943 944 945 946 947 948 949 950 951 952 953
        if (ret < 0) {
            return -1;
        }
        if (decompress_buffer(s->cluster_cache, s->cluster_size,
                              s->cluster_data + sector_offset, csize) < 0) {
            return -1;
        }
        s->cluster_cache_offset = coffset;
    }
    return 0;
}