提交 03396148 编写于 作者: M Michael Tokarev

allow qemu_iovec_from_buffer() to specify offset from which to start copying

Similar to
 qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
                   int c, size_t bytes);
the new prototype is:
 qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
                     const void *buf, size_t bytes);

The processing starts at offset bytes within qiov.

This way, we may copy a bounce buffer directly to
a middle of qiov.

This is exactly the same function as iov_from_buf() from
iov.c, so use the existing implementation and rename it
to qemu_iovec_from_buf() to be shorter and to match the
utility function.

As with utility implementation, we now assert that the
offset is inside actual iovec.  Nothing changed for
current callers, because `offset' parameter is new.

While at it, stop using "bounce-qiov" in block/qcow2.c
and copy decrypted data directly from cluster_data
instead of recreating a temp qiov for doing that.
Signed-off-by: NMichael Tokarev <mjt@tls.msk.ru>
上级 3d9b4925
......@@ -1821,8 +1821,8 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
}
skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
nb_sectors * BDRV_SECTOR_SIZE);
qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
nb_sectors * BDRV_SECTOR_SIZE);
err:
qemu_vfree(bounce_buffer);
......@@ -3382,7 +3382,7 @@ static void bdrv_aio_bh_cb(void *opaque)
BlockDriverAIOCBSync *acb = opaque;
if (!acb->is_write)
qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
qemu_vfree(acb->bounce);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_bh_delete(acb->bh);
......
......@@ -140,8 +140,8 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
continue;
if ((s->buf_off >= acb->end)) {
qemu_iovec_from_buffer(acb->qiov, s->orig_buf + acb->start,
acb->end - acb->start);
qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start,
acb->end - acb->start);
acb->common.cb(acb->common.opaque, 0);
qemu_aio_release(acb);
s->acb[i] = NULL;
......@@ -176,7 +176,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
{
char *buf = state->orig_buf + (start - state->buf_start);
qemu_iovec_from_buffer(acb->qiov, buf, len);
qemu_iovec_from_buf(acb->qiov, 0, buf, len);
acb->common.cb(acb->common.opaque, 0);
return FIND_RET_OK;
......
......@@ -540,7 +540,7 @@ done:
qemu_co_mutex_unlock(&s->lock);
if (qiov->niov > 1) {
qemu_iovec_from_buffer(qiov, orig_buf, qiov->size);
qemu_iovec_from_buf(qiov, 0, orig_buf, qiov->size);
qemu_vfree(orig_buf);
}
......
......@@ -590,7 +590,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
goto fail;
}
qemu_iovec_from_buffer(&hd_qiov,
qemu_iovec_from_buf(&hd_qiov, 0,
s->cluster_cache + index_in_cluster * 512,
512 * cur_nr_sectors);
break;
......@@ -630,11 +630,8 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
if (s->crypt_method) {
qcow2_encrypt_sectors(s, sector_num, cluster_data,
cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key);
qemu_iovec_reset(&hd_qiov);
qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
cur_nr_sectors * 512);
qemu_iovec_from_buffer(&hd_qiov, cluster_data,
512 * cur_nr_sectors);
qemu_iovec_from_buf(qiov, bytes_done,
cluster_data, 512 * cur_nr_sectors);
}
break;
......
......@@ -620,7 +620,7 @@ static void rbd_aio_bh_cb(void *opaque)
RBDAIOCB *acb = opaque;
if (acb->cmd == RBD_AIO_READ) {
qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
}
qemu_vfree(acb->bounce);
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
......
......@@ -245,20 +245,10 @@ void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf)
}
}
void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
const void *buf, size_t bytes)
{
const uint8_t *p = (const uint8_t *)buf;
size_t copy;
int i;
for (i = 0; i < qiov->niov && count; ++i) {
copy = count;
if (copy > qiov->iov[i].iov_len)
copy = qiov->iov[i].iov_len;
memcpy(qiov->iov[i].iov_base, p, copy);
p += copy;
count -= copy;
}
return iov_from_buf(qiov->iov, qiov->niov, offset, buf, bytes);
}
size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
......
......@@ -346,7 +346,8 @@ void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size);
void qemu_iovec_destroy(QEMUIOVector *qiov);
void qemu_iovec_reset(QEMUIOVector *qiov);
void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf);
void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count);
size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
const void *buf, size_t bytes);
size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
int fillc, size_t bytes);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册