提交 e8f263e0 编写于 作者: J Jiri Denemark

qemu: Cancel disk mirrors after libvirtd restart

When libvirtd is restarted during migration, we properly cancel the
ongoing migration (unless it managed to almost finished before the
restart). But if we were also migrating storage using NBD, we would
completely forget about the running disk mirrors.
Signed-off-by: NJiri Denemark <jdenemar@redhat.com>
上级 3a18bd2d
...@@ -578,7 +578,27 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf, ...@@ -578,7 +578,27 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf,
qemuDomainAsyncJobPhaseToString( qemuDomainAsyncJobPhaseToString(
priv->job.asyncJob, priv->job.phase)); priv->job.asyncJob, priv->job.phase));
} }
virBufferAddLit(buf, "/>\n"); if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
virBufferAddLit(buf, "/>\n");
} else {
size_t i;
virDomainDiskDefPtr disk;
qemuDomainDiskPrivatePtr diskPriv;
virBufferAddLit(buf, ">\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < vm->def->ndisks; i++) {
disk = vm->def->disks[i];
diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
virBufferAsprintf(buf, "<disk dev='%s' migrating='%s'/>\n",
disk->dst,
diskPriv->migrating ? "yes" : "no");
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</job>\n");
}
} }
priv->job.active = job; priv->job.active = job;
...@@ -736,6 +756,29 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, ...@@ -736,6 +756,29 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
} }
} }
if ((n = virXPathNodeSet("./job[1]/disk[@migrating='yes']",
ctxt, &nodes)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to parse list of disks marked for migration"));
goto error;
}
if (n > 0) {
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
VIR_WARN("Found disks marked for migration but we were not "
"migrating");
n = 0;
}
for (i = 0; i < n; i++) {
char *dst = virXMLPropString(nodes[i], "dev");
virDomainDiskDefPtr disk;
if (dst && (disk = virDomainDiskByName(vm->def, dst, false)))
QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating = true;
VIR_FREE(dst);
}
}
VIR_FREE(nodes);
priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1; priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1;
if ((n = virXPathNodeSet("./devices/device", ctxt, &nodes)) < 0) { if ((n = virXPathNodeSet("./devices/device", ctxt, &nodes)) < 0) {
......
...@@ -2026,6 +2026,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, ...@@ -2026,6 +2026,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
char *hoststr = NULL; char *hoststr = NULL;
unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT; unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
int rv; int rv;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
VIR_DEBUG("Starting drive mirrors for domain %s", vm->def->name); VIR_DEBUG("Starting drive mirrors for domain %s", vm->def->name);
...@@ -2075,6 +2076,11 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, ...@@ -2075,6 +2076,11 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
goto cleanup; goto cleanup;
} }
diskPriv->migrating = true; diskPriv->migrating = true;
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) {
VIR_WARN("Failed to save status on vm %s", vm->def->name);
goto cleanup;
}
} }
while ((rv = qemuMigrationDriveMirrorReady(driver, vm)) != 1) { while ((rv = qemuMigrationDriveMirrorReady(driver, vm)) != 1) {
...@@ -2102,6 +2108,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, ...@@ -2102,6 +2108,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
ret = 0; ret = 0;
cleanup: cleanup:
virObjectUnref(cfg);
VIR_FREE(diskAlias); VIR_FREE(diskAlias);
VIR_FREE(nbd_dest); VIR_FREE(nbd_dest);
VIR_FREE(hoststr); VIR_FREE(hoststr);
...@@ -5817,6 +5824,84 @@ qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, ...@@ -5817,6 +5824,84 @@ qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
return ret; return ret;
} }
int
qemuMigrationCancel(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr blockJobs = NULL;
bool storage = false;
size_t i;
int ret = -1;
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name);
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) {
qemuBlockJobSyncBegin(disk);
storage = true;
}
}
qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
if (storage)
blockJobs = qemuMonitorGetAllBlockJobInfo(priv->mon);
if (qemuDomainObjExitMonitor(driver, vm) < 0 || (storage && !blockJobs))
goto endsyncjob;
if (!storage) {
ret = 0;
goto cleanup;
}
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (!diskPriv->migrating)
continue;
if (virHashLookup(blockJobs, disk->info.alias)) {
VIR_DEBUG("Drive mirror on disk %s is still running", disk->dst);
} else {
VIR_DEBUG("Drive mirror on disk %s is gone", disk->dst);
qemuBlockJobSyncEnd(driver, vm, disk);
diskPriv->migrating = false;
}
}
if (qemuMigrationCancelDriveMirror(driver, vm, false,
QEMU_ASYNC_JOB_NONE) < 0)
goto endsyncjob;
ret = 0;
cleanup:
virHashFree(blockJobs);
return ret;
endsyncjob:
if (storage) {
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (diskPriv->migrating) {
qemuBlockJobSyncEnd(driver, vm, disk);
diskPriv->migrating = false;
}
}
}
goto cleanup;
}
int int
qemuMigrationJobStart(virQEMUDriverPtr driver, qemuMigrationJobStart(virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
......
...@@ -185,4 +185,7 @@ int qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, ...@@ -185,4 +185,7 @@ int qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(5)
ATTRIBUTE_RETURN_CHECK; ATTRIBUTE_RETURN_CHECK;
int qemuMigrationCancel(virQEMUDriverPtr driver,
virDomainObjPtr vm);
#endif /* __QEMU_MIGRATION_H__ */ #endif /* __QEMU_MIGRATION_H__ */
...@@ -3354,8 +3354,6 @@ qemuProcessRecoverMigration(virQEMUDriverPtr driver, ...@@ -3354,8 +3354,6 @@ qemuProcessRecoverMigration(virQEMUDriverPtr driver,
virDomainState state, virDomainState state,
int reason) int reason)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData;
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) { if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
switch (phase) { switch (phase) {
case QEMU_MIGRATION_PHASE_NONE: case QEMU_MIGRATION_PHASE_NONE:
...@@ -3409,11 +3407,7 @@ qemuProcessRecoverMigration(virQEMUDriverPtr driver, ...@@ -3409,11 +3407,7 @@ qemuProcessRecoverMigration(virQEMUDriverPtr driver,
case QEMU_MIGRATION_PHASE_PERFORM3: case QEMU_MIGRATION_PHASE_PERFORM3:
/* migration is still in progress, let's cancel it and resume the /* migration is still in progress, let's cancel it and resume the
* domain */ * domain */
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", if (qemuMigrationCancel(driver, vm) < 0)
vm->def->name);
qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
if (qemuDomainObjExitMonitor(driver, vm) < 0)
return -1; return -1;
/* resume the domain but only if it was paused as a result of /* resume the domain but only if it was paused as a result of
* migration */ * migration */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册