提交 b4b3ab14 编写于 作者: F Fam Zheng 提交者: Kevin Wolf

VMDK: separate vmdk_open by format version

Separate vmdk_open by subformats to:
* vmdk_open_vmdk3
* vmdk_open_vmdk4
Signed-off-by: NFam Zheng <famcool@gmail.com>
Reviewed-by: NStefan Hajnoczi <stefanha@linux.vnet.ibm.com>
Signed-off-by: NKevin Wolf <kwolf@redhat.com>
上级 01fc99d6
...@@ -458,33 +458,89 @@ static VmdkExtent *vmdk_add_extent(BlockDriverState *bs, ...@@ -458,33 +458,89 @@ static VmdkExtent *vmdk_add_extent(BlockDriverState *bs,
return extent; return extent;
} }
static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent)
static int vmdk_open(BlockDriverState *bs, int flags)
{ {
BDRVVmdkState *s = bs->opaque; int ret;
uint32_t magic; int l1_size, i;
int i;
uint32_t l1_size, l1_entry_sectors;
VmdkExtent *extent = NULL;
if (bdrv_pread(bs->file, 0, &magic, sizeof(magic)) != sizeof(magic)) /* read the L1 table */
goto fail; l1_size = extent->l1_size * sizeof(uint32_t);
extent->l1_table = qemu_malloc(l1_size);
ret = bdrv_pread(extent->file,
extent->l1_table_offset,
extent->l1_table,
l1_size);
if (ret < 0) {
goto fail_l1;
}
for (i = 0; i < extent->l1_size; i++) {
le32_to_cpus(&extent->l1_table[i]);
}
magic = be32_to_cpu(magic); if (extent->l1_backup_table_offset) {
if (magic == VMDK3_MAGIC) { extent->l1_backup_table = qemu_malloc(l1_size);
ret = bdrv_pread(extent->file,
extent->l1_backup_table_offset,
extent->l1_backup_table,
l1_size);
if (ret < 0) {
goto fail_l1b;
}
for (i = 0; i < extent->l1_size; i++) {
le32_to_cpus(&extent->l1_backup_table[i]);
}
}
extent->l2_cache =
qemu_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
return 0;
fail_l1b:
qemu_free(extent->l1_backup_table);
fail_l1:
qemu_free(extent->l1_table);
return ret;
}
static int vmdk_open_vmdk3(BlockDriverState *bs, int flags)
{
int ret;
uint32_t magic;
VMDK3Header header; VMDK3Header header;
if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)) VmdkExtent *extent;
!= sizeof(header)) {
ret = bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header));
if (ret < 0) {
goto fail; goto fail;
} }
extent = vmdk_add_extent(bs, bs->file, false, extent = vmdk_add_extent(bs,
bs->file, false,
le32_to_cpu(header.disk_sectors), le32_to_cpu(header.disk_sectors),
le32_to_cpu(header.l1dir_offset) << 9, 0, le32_to_cpu(header.l1dir_offset) << 9,
1 << 6, 1 << 9, le32_to_cpu(header.granularity)); 0, 1 << 6, 1 << 9,
} else if (magic == VMDK4_MAGIC) { le32_to_cpu(header.granularity));
ret = vmdk_init_tables(bs, extent);
if (ret) {
/* vmdk_init_tables cleans up on fail, so only free allocation of
* vmdk_add_extent here. */
goto fail;
}
return 0;
fail:
vmdk_free_extents(bs);
return ret;
}
static int vmdk_open_vmdk4(BlockDriverState *bs, int flags)
{
int ret;
uint32_t magic;
uint32_t l1_size, l1_entry_sectors;
VMDK4Header header; VMDK4Header header;
if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)) BDRVVmdkState *s = bs->opaque;
!= sizeof(header)) { VmdkExtent *extent;
ret = bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header));
if (ret < 0) {
goto fail; goto fail;
} }
l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte) l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte)
...@@ -499,51 +555,41 @@ static int vmdk_open(BlockDriverState *bs, int flags) ...@@ -499,51 +555,41 @@ static int vmdk_open(BlockDriverState *bs, int flags)
le32_to_cpu(header.num_gtes_per_gte), le32_to_cpu(header.num_gtes_per_gte),
le64_to_cpu(header.granularity)); le64_to_cpu(header.granularity));
if (extent->l1_entry_sectors <= 0) { if (extent->l1_entry_sectors <= 0) {
ret = -EINVAL;
goto fail; goto fail;
} }
// try to open parent images, if exist /* try to open parent images, if exist */
if (vmdk_parent_open(bs) != 0) ret = vmdk_parent_open(bs);
goto fail; if (ret) {
// write the CID once after the image creation
s->parent_cid = vmdk_read_cid(bs,1);
} else {
goto fail;
}
/* read the L1 table */
l1_size = extent->l1_size * sizeof(uint32_t);
extent->l1_table = qemu_malloc(l1_size);
if (bdrv_pread(bs->file,
extent->l1_table_offset,
extent->l1_table,
l1_size)
!= l1_size) {
goto fail; goto fail;
} }
for (i = 0; i < extent->l1_size; i++) { s->parent_cid = vmdk_read_cid(bs, 1);
le32_to_cpus(&extent->l1_table[i]); ret = vmdk_init_tables(bs, extent);
} if (ret) {
if (extent->l1_backup_table_offset) {
extent->l1_backup_table = qemu_malloc(l1_size);
if (bdrv_pread(bs->file,
extent->l1_backup_table_offset,
extent->l1_backup_table,
l1_size)
!= l1_size) {
goto fail; goto fail;
} }
for (i = 0; i < extent->l1_size; i++) {
le32_to_cpus(&extent->l1_backup_table[i]);
}
}
extent->l2_cache =
qemu_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
return 0; return 0;
fail: fail:
vmdk_free_extents(bs); vmdk_free_extents(bs);
return -1; return ret;
}
static int vmdk_open(BlockDriverState *bs, int flags)
{
uint32_t magic;
if (bdrv_pread(bs->file, 0, &magic, sizeof(magic)) != sizeof(magic)) {
return -EIO;
}
magic = be32_to_cpu(magic);
if (magic == VMDK3_MAGIC) {
return vmdk_open_vmdk3(bs, flags);
} else if (magic == VMDK4_MAGIC) {
return vmdk_open_vmdk4(bs, flags);
} else {
return -EINVAL;
}
} }
static int get_whole_cluster(BlockDriverState *bs, static int get_whole_cluster(BlockDriverState *bs,
...@@ -630,11 +676,11 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, ...@@ -630,11 +676,11 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
if (!l2_offset) { if (!l2_offset) {
return 0; return 0;
} }
for(i = 0; i < L2_CACHE_SIZE; i++) { for (i = 0; i < L2_CACHE_SIZE; i++) {
if (l2_offset == extent->l2_cache_offsets[i]) { if (l2_offset == extent->l2_cache_offsets[i]) {
/* increment the hit count */ /* increment the hit count */
if (++extent->l2_cache_counts[i] == 0xffffffff) { if (++extent->l2_cache_counts[i] == 0xffffffff) {
for(j = 0; j < L2_CACHE_SIZE; j++) { for (j = 0; j < L2_CACHE_SIZE; j++) {
extent->l2_cache_counts[j] >>= 1; extent->l2_cache_counts[j] >>= 1;
} }
} }
...@@ -645,7 +691,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs, ...@@ -645,7 +691,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
/* not found: load a new entry in the least used one */ /* not found: load a new entry in the least used one */
min_index = 0; min_index = 0;
min_count = 0xffffffff; min_count = 0xffffffff;
for(i = 0; i < L2_CACHE_SIZE; i++) { for (i = 0; i < L2_CACHE_SIZE; i++) {
if (extent->l2_cache_counts[i] < min_count) { if (extent->l2_cache_counts[i] < min_count) {
min_count = extent->l2_cache_counts[i]; min_count = extent->l2_cache_counts[i];
min_index = i; min_index = i;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册