提交 148da7ea 编写于 作者: K Kevin Wolf 提交者: Anthony Liguori

qcow2: Return 0/-errno in qcow2_alloc_cluster_offset

Returning 0/-errno allows it to distingush different errors classes. The
cluster offset of newly allocated clusters is now returned in the QCowL2Meta
struct.
Signed-off-by: NKevin Wolf <kwolf@redhat.com>
Signed-off-by: NAnthony Liguori <aliguori@us.ibm.com>
上级 1e3e8f1a
......@@ -611,12 +611,12 @@ static int write_l2_entries(BDRVQcowState *s, uint64_t *l2_table,
return 0;
}
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
QCowL2Meta *m)
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
{
BDRVQcowState *s = bs->opaque;
int i, j = 0, l2_index, ret;
uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
uint64_t cluster_offset = m->cluster_offset;
if (m->nb_clusters == 0)
return 0;
......@@ -677,16 +677,22 @@ err:
/*
* alloc_cluster_offset
*
* For a given offset of the disk image, return cluster offset in
* qcow2 file.
*
* For a given offset of the disk image, return cluster offset in qcow2 file.
* If the offset is not found, allocate a new cluster.
*
* Return the cluster offset if successful,
* Return 0, otherwise.
* If the cluster was already allocated, m->nb_clusters is set to 0,
* m->depends_on is set to NULL and the other fields in m are meaningless.
*
* If the cluster is newly allocated, m->nb_clusters is set to the number of
* contiguous clusters that have been allocated. This may be 0 if the request
* conflict with another write request in flight; in this case, m->depends_on
* is set and the remaining fields of m are meaningless.
*
* If m->nb_clusters is non-zero, the other fields of m are valid and contain
* information about the first allocated cluster.
*
* Return 0 on success and -errno in error cases
*/
uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int n_start, int n_end,
......@@ -700,7 +706,7 @@ uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
if (ret < 0) {
return 0;
return ret;
}
nb_clusters = size_to_clusters(s, n_end << 9);
......@@ -717,6 +723,7 @@ uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
cluster_offset &= ~QCOW_OFLAG_COPIED;
m->nb_clusters = 0;
m->depends_on = NULL;
goto out;
}
......@@ -795,10 +802,11 @@ uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
out:
m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
m->cluster_offset = cluster_offset;
*num = m->nb_available - n_start;
return cluster_offset;
return 0;
}
static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
......
......@@ -561,7 +561,7 @@ static void qcow_aio_write_cb(void *opaque, int ret)
acb->hd_aiocb = NULL;
if (ret >= 0) {
ret = qcow2_alloc_cluster_link_l2(bs, acb->cluster_offset, &acb->l2meta);
ret = qcow2_alloc_cluster_link_l2(bs, &acb->l2meta);
}
run_dependent_requests(&acb->l2meta);
......@@ -585,21 +585,23 @@ static void qcow_aio_write_cb(void *opaque, int ret)
n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
acb->cluster_offset = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
index_in_cluster,
n_end, &acb->n, &acb->l2meta);
ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
index_in_cluster, n_end, &acb->n, &acb->l2meta);
if (ret < 0) {
goto done;
}
acb->cluster_offset = acb->l2meta.cluster_offset;
/* Need to wait for another request? If so, we are done for now. */
if (!acb->cluster_offset && acb->l2meta.depends_on != NULL) {
if (acb->l2meta.nb_clusters == 0 && acb->l2meta.depends_on != NULL) {
QLIST_INSERT_HEAD(&acb->l2meta.depends_on->dependent_requests,
acb, next_depend);
return;
}
if (!acb->cluster_offset || (acb->cluster_offset & 511) != 0) {
ret = -EIO;
goto done;
}
assert((acb->cluster_offset & 511) == 0);
if (s->crypt_method) {
if (!acb->cluster_data) {
acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS *
......@@ -782,27 +784,27 @@ static int get_bits_from_size(size_t size)
static int preallocate(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
uint64_t cluster_offset = 0;
uint64_t nb_sectors;
uint64_t offset;
int num;
int ret;
QCowL2Meta meta;
nb_sectors = bdrv_getlength(bs) >> 9;
offset = 0;
QLIST_INIT(&meta.dependent_requests);
meta.cluster_offset = 0;
while (nb_sectors) {
num = MIN(nb_sectors, INT_MAX >> 9);
cluster_offset = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num,
&meta);
ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num, &meta);
if (cluster_offset == 0) {
if (ret < 0) {
return -1;
}
if (qcow2_alloc_cluster_link_l2(bs, cluster_offset, &meta) < 0) {
qcow2_free_any_clusters(bs, cluster_offset, meta.nb_clusters);
if (qcow2_alloc_cluster_link_l2(bs, &meta) < 0) {
qcow2_free_any_clusters(bs, meta.cluster_offset, meta.nb_clusters);
return -1;
}
......@@ -821,10 +823,10 @@ static int preallocate(BlockDriverState *bs)
* all of the allocated clusters (otherwise we get failing reads after
* EOF). Extend the image to the last allocated sector.
*/
if (cluster_offset != 0) {
if (meta.cluster_offset != 0) {
uint8_t buf[512];
memset(buf, 0, 512);
bdrv_write(s->hd, (cluster_offset >> 9) + num - 1, buf, 1);
bdrv_write(s->hd, (meta.cluster_offset >> 9) + num - 1, buf, 1);
}
return 0;
......
......@@ -135,6 +135,7 @@ struct QCowAIOCB;
typedef struct QCowL2Meta
{
uint64_t offset;
uint64_t cluster_offset;
int n_start;
int nb_available;
int nb_clusters;
......@@ -199,8 +200,7 @@ uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int compressed_size);
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
QCowL2Meta *m);
int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
/* qcow2-snapshot.c functions */
int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册