提交 03f5d60b 编写于 作者: E Eric Blake 提交者: Kevin Wolf

backup: Switch backup_do_cow() to byte-based

We are gradually converting to byte-based interfaces, as they are
easier to reason about than sector-based.  Convert another internal
function (no semantic change).
Signed-off-by: NEric Blake <eblake@redhat.com>
Reviewed-by: NJohn Snow <jsnow@redhat.com>
Reviewed-by: NJeff Cody <jcody@redhat.com>
Reviewed-by: NKevin Wolf <kwolf@redhat.com>
Signed-off-by: NKevin Wolf <kwolf@redhat.com>
上级 f6ac2078
...@@ -91,7 +91,7 @@ static void cow_request_end(CowRequest *req) ...@@ -91,7 +91,7 @@ static void cow_request_end(CowRequest *req)
} }
static int coroutine_fn backup_do_cow(BackupBlockJob *job, static int coroutine_fn backup_do_cow(BackupBlockJob *job,
int64_t sector_num, int nb_sectors, int64_t offset, uint64_t bytes,
bool *error_is_read, bool *error_is_read,
bool is_write_notifier) bool is_write_notifier)
{ {
...@@ -101,34 +101,28 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, ...@@ -101,34 +101,28 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
QEMUIOVector bounce_qiov; QEMUIOVector bounce_qiov;
void *bounce_buffer = NULL; void *bounce_buffer = NULL;
int ret = 0; int ret = 0;
int64_t sectors_per_cluster = cluster_size_sectors(job); int64_t start, end; /* bytes */
int64_t start, end; /* clusters */
int n; /* bytes */ int n; /* bytes */
qemu_co_rwlock_rdlock(&job->flush_rwlock); qemu_co_rwlock_rdlock(&job->flush_rwlock);
start = sector_num / sectors_per_cluster; start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
trace_backup_do_cow_enter(job, start * job->cluster_size, trace_backup_do_cow_enter(job, start, offset, bytes);
sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE);
wait_for_overlapping_requests(job, start * job->cluster_size, wait_for_overlapping_requests(job, start, end);
end * job->cluster_size); cow_request_begin(&cow_request, job, start, end);
cow_request_begin(&cow_request, job, start * job->cluster_size,
end * job->cluster_size);
for (; start < end; start++) { for (; start < end; start += job->cluster_size) {
if (test_bit(start, job->done_bitmap)) { if (test_bit(start / job->cluster_size, job->done_bitmap)) {
trace_backup_do_cow_skip(job, start * job->cluster_size); trace_backup_do_cow_skip(job, start);
continue; /* already copied */ continue; /* already copied */
} }
trace_backup_do_cow_process(job, start * job->cluster_size); trace_backup_do_cow_process(job, start);
n = MIN(job->cluster_size, n = MIN(job->cluster_size, job->common.len - start);
job->common.len - start * job->cluster_size);
if (!bounce_buffer) { if (!bounce_buffer) {
bounce_buffer = blk_blockalign(blk, job->cluster_size); bounce_buffer = blk_blockalign(blk, job->cluster_size);
...@@ -137,11 +131,10 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, ...@@ -137,11 +131,10 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
iov.iov_len = n; iov.iov_len = n;
qemu_iovec_init_external(&bounce_qiov, &iov, 1); qemu_iovec_init_external(&bounce_qiov, &iov, 1);
ret = blk_co_preadv(blk, start * job->cluster_size, ret = blk_co_preadv(blk, start, bounce_qiov.size, &bounce_qiov,
bounce_qiov.size, &bounce_qiov,
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
if (ret < 0) { if (ret < 0) {
trace_backup_do_cow_read_fail(job, start * job->cluster_size, ret); trace_backup_do_cow_read_fail(job, start, ret);
if (error_is_read) { if (error_is_read) {
*error_is_read = true; *error_is_read = true;
} }
...@@ -149,22 +142,22 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, ...@@ -149,22 +142,22 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
} }
if (buffer_is_zero(iov.iov_base, iov.iov_len)) { if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
ret = blk_co_pwrite_zeroes(job->target, start * job->cluster_size, ret = blk_co_pwrite_zeroes(job->target, start,
bounce_qiov.size, BDRV_REQ_MAY_UNMAP); bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
} else { } else {
ret = blk_co_pwritev(job->target, start * job->cluster_size, ret = blk_co_pwritev(job->target, start,
bounce_qiov.size, &bounce_qiov, bounce_qiov.size, &bounce_qiov,
job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0); job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
} }
if (ret < 0) { if (ret < 0) {
trace_backup_do_cow_write_fail(job, start * job->cluster_size, ret); trace_backup_do_cow_write_fail(job, start, ret);
if (error_is_read) { if (error_is_read) {
*error_is_read = false; *error_is_read = false;
} }
goto out; goto out;
} }
set_bit(start, job->done_bitmap); set_bit(start / job->cluster_size, job->done_bitmap);
/* Publish progress, guest I/O counts as progress too. Note that the /* Publish progress, guest I/O counts as progress too. Note that the
* offset field is an opaque progress value, it is not a disk offset. * offset field is an opaque progress value, it is not a disk offset.
...@@ -180,8 +173,7 @@ out: ...@@ -180,8 +173,7 @@ out:
cow_request_end(&cow_request); cow_request_end(&cow_request);
trace_backup_do_cow_return(job, sector_num * BDRV_SECTOR_SIZE, trace_backup_do_cow_return(job, offset, bytes, ret);
nb_sectors * BDRV_SECTOR_SIZE, ret);
qemu_co_rwlock_unlock(&job->flush_rwlock); qemu_co_rwlock_unlock(&job->flush_rwlock);
...@@ -194,14 +186,12 @@ static int coroutine_fn backup_before_write_notify( ...@@ -194,14 +186,12 @@ static int coroutine_fn backup_before_write_notify(
{ {
BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write); BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
BdrvTrackedRequest *req = opaque; BdrvTrackedRequest *req = opaque;
int64_t sector_num = req->offset >> BDRV_SECTOR_BITS;
int nb_sectors = req->bytes >> BDRV_SECTOR_BITS;
assert(req->bs == blk_bs(job->common.blk)); assert(req->bs == blk_bs(job->common.blk));
assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE));
assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0); assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE));
return backup_do_cow(job, sector_num, nb_sectors, NULL, true); return backup_do_cow(job, req->offset, req->bytes, NULL, true);
} }
static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp) static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
...@@ -406,8 +396,8 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) ...@@ -406,8 +396,8 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
if (yield_and_check(job)) { if (yield_and_check(job)) {
goto out; goto out;
} }
ret = backup_do_cow(job, cluster * sectors_per_cluster, ret = backup_do_cow(job, cluster * job->cluster_size,
sectors_per_cluster, &error_is_read, job->cluster_size, &error_is_read,
false); false);
if ((ret < 0) && if ((ret < 0) &&
backup_error_action(job, error_is_read, -ret) == backup_error_action(job, error_is_read, -ret) ==
...@@ -509,8 +499,8 @@ static void coroutine_fn backup_run(void *opaque) ...@@ -509,8 +499,8 @@ static void coroutine_fn backup_run(void *opaque)
if (alloced < 0) { if (alloced < 0) {
ret = alloced; ret = alloced;
} else { } else {
ret = backup_do_cow(job, start * sectors_per_cluster, ret = backup_do_cow(job, start * job->cluster_size,
sectors_per_cluster, &error_is_read, job->cluster_size, &error_is_read,
false); false);
} }
if (ret < 0) { if (ret < 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册