You need to sign in or sign up before continuing.
提交 8b9bf787 编写于 作者: L liguang 提交者: Jiri Denemark

Add support for offline migration

Offline migration transfers inactive definition of a domain (which may
or may not be active). After successful completion, the domain remains
in its current state on source host and is defined but inactive on
destination host. It's a bit more clever than virDomainGetXMLDesc() on
source host followed by virDomainDefineXML() on destination host, as
offline migration will run pre-migration hook to update the domain XML
on destination host. Currently, copying non-shared storage is not
supported during offline migration.

Offline migration can be requested with a new migration flag called
VIR_MIGRATE_OFFLINE (which has to be combined with
VIR_MIGRATE_PERSIST_DEST flag).
上级 e5577872
...@@ -1092,6 +1092,7 @@ typedef enum { ...@@ -1092,6 +1092,7 @@ typedef enum {
* whole migration process; this will be used automatically * whole migration process; this will be used automatically
* when supported */ * when supported */
VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */
VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */
} virDomainMigrateFlags; } virDomainMigrateFlags;
/* Domain migration. */ /* Domain migration. */
......
...@@ -4829,6 +4829,14 @@ virDomainMigrateVersion3(virDomainPtr domain, ...@@ -4829,6 +4829,14 @@ virDomainMigrateVersion3(virDomainPtr domain,
if (uri_out) if (uri_out)
uri = uri_out; /* Did domainMigratePrepare3 change URI? */ uri = uri_out; /* Did domainMigratePrepare3 change URI? */
if (flags & VIR_MIGRATE_OFFLINE) {
VIR_DEBUG("Offline migration, skipping Perform phase");
VIR_FREE(cookieout);
cookieoutlen = 0;
cancelled = 0;
goto finish;
}
/* Perform the migration. The driver isn't supposed to return /* Perform the migration. The driver isn't supposed to return
* until the migration is complete. The src VM should remain * until the migration is complete. The src VM should remain
* running, but in paused state until the destination can * running, but in paused state until the destination can
...@@ -5199,6 +5207,23 @@ virDomainMigrate(virDomainPtr domain, ...@@ -5199,6 +5207,23 @@ virDomainMigrate(virDomainPtr domain,
goto error; goto error;
} }
if (flags & VIR_MIGRATE_OFFLINE) {
if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("offline migration is not supported by "
"the source host"));
goto error;
}
if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("offline migration is not supported by "
"the destination host"));
goto error;
}
}
if (flags & VIR_MIGRATE_PEER2PEER) { if (flags & VIR_MIGRATE_PEER2PEER) {
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_P2P)) { VIR_DRV_FEATURE_MIGRATION_P2P)) {
...@@ -5404,6 +5429,23 @@ virDomainMigrate2(virDomainPtr domain, ...@@ -5404,6 +5429,23 @@ virDomainMigrate2(virDomainPtr domain,
goto error; goto error;
} }
if (flags & VIR_MIGRATE_OFFLINE) {
if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("offline migration is not supported by "
"the source host"));
goto error;
}
if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("offline migration is not supported by "
"the destination host"));
goto error;
}
}
if (flags & VIR_MIGRATE_PEER2PEER) { if (flags & VIR_MIGRATE_PEER2PEER) {
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_P2P)) { VIR_DRV_FEATURE_MIGRATION_P2P)) {
...@@ -5581,6 +5623,15 @@ virDomainMigrateToURI(virDomainPtr domain, ...@@ -5581,6 +5623,15 @@ virDomainMigrateToURI(virDomainPtr domain,
virCheckNonNullArgGoto(duri, error); virCheckNonNullArgGoto(duri, error);
if (flags & VIR_MIGRATE_OFFLINE &&
!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("offline migration is not supported by "
"the source host"));
goto error;
}
if (flags & VIR_MIGRATE_PEER2PEER) { if (flags & VIR_MIGRATE_PEER2PEER) {
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_P2P)) { VIR_DRV_FEATURE_MIGRATION_P2P)) {
......
...@@ -105,6 +105,11 @@ enum { ...@@ -105,6 +105,11 @@ enum {
* Support for VIR_DOMAIN_XML_MIGRATABLE flag in domainGetXMLDesc * Support for VIR_DOMAIN_XML_MIGRATABLE flag in domainGetXMLDesc
*/ */
VIR_DRV_FEATURE_XML_MIGRATABLE = 11, VIR_DRV_FEATURE_XML_MIGRATABLE = 11,
/*
* Support for offline migration.
*/
VIR_DRV_FEATURE_MIGRATION_OFFLINE = 12,
}; };
......
...@@ -1208,6 +1208,7 @@ qemuSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int feature) ...@@ -1208,6 +1208,7 @@ qemuSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int feature)
case VIR_DRV_FEATURE_FD_PASSING: case VIR_DRV_FEATURE_FD_PASSING:
case VIR_DRV_FEATURE_TYPED_PARAM_STRING: case VIR_DRV_FEATURE_TYPED_PARAM_STRING:
case VIR_DRV_FEATURE_XML_MIGRATABLE: case VIR_DRV_FEATURE_XML_MIGRATABLE:
case VIR_DRV_FEATURE_MIGRATION_OFFLINE:
return 1; return 1;
default: default:
return 0; return 0;
...@@ -9698,7 +9699,7 @@ qemuDomainMigratePrepareTunnel(virConnectPtr dconn, ...@@ -9698,7 +9699,7 @@ qemuDomainMigratePrepareTunnel(virConnectPtr dconn,
ret = qemuMigrationPrepareTunnel(driver, dconn, ret = qemuMigrationPrepareTunnel(driver, dconn,
NULL, 0, NULL, NULL, /* No cookies in v2 */ NULL, 0, NULL, NULL, /* No cookies in v2 */
st, dname, dom_xml); st, dname, dom_xml, flags);
cleanup: cleanup:
qemuDriverUnlock(driver); qemuDriverUnlock(driver);
...@@ -9758,7 +9759,7 @@ qemuDomainMigratePrepare2(virConnectPtr dconn, ...@@ -9758,7 +9759,7 @@ qemuDomainMigratePrepare2(virConnectPtr dconn,
ret = qemuMigrationPrepareDirect(driver, dconn, ret = qemuMigrationPrepareDirect(driver, dconn,
NULL, 0, NULL, NULL, /* No cookies */ NULL, 0, NULL, NULL, /* No cookies */
uri_in, uri_out, uri_in, uri_out,
dname, dom_xml); dname, dom_xml, flags);
cleanup: cleanup:
qemuDriverUnlock(driver); qemuDriverUnlock(driver);
...@@ -9900,7 +9901,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, ...@@ -9900,7 +9901,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
asyncJob = QEMU_ASYNC_JOB_NONE; asyncJob = QEMU_ASYNC_JOB_NONE;
} }
if (!virDomainObjIsActive(vm)) { if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_OPERATION_INVALID, virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running")); "%s", _("domain is not running"));
goto endjob; goto endjob;
...@@ -9909,8 +9910,8 @@ qemuDomainMigrateBegin3(virDomainPtr domain, ...@@ -9909,8 +9910,8 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
/* Check if there is any ejected media. /* Check if there is any ejected media.
* We don't want to require them on the destination. * We don't want to require them on the destination.
*/ */
if (!(flags & VIR_MIGRATE_OFFLINE) &&
if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
goto endjob; goto endjob;
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
...@@ -9995,7 +9996,7 @@ qemuDomainMigratePrepare3(virConnectPtr dconn, ...@@ -9995,7 +9996,7 @@ qemuDomainMigratePrepare3(virConnectPtr dconn,
cookiein, cookieinlen, cookiein, cookieinlen,
cookieout, cookieoutlen, cookieout, cookieoutlen,
uri_in, uri_out, uri_in, uri_out,
dname, dom_xml); dname, dom_xml, flags);
cleanup: cleanup:
qemuDriverUnlock(driver); qemuDriverUnlock(driver);
...@@ -10040,7 +10041,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn, ...@@ -10040,7 +10041,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn,
ret = qemuMigrationPrepareTunnel(driver, dconn, ret = qemuMigrationPrepareTunnel(driver, dconn,
cookiein, cookieinlen, cookiein, cookieinlen,
cookieout, cookieoutlen, cookieout, cookieoutlen,
st, dname, dom_xml); st, dname, dom_xml, flags);
qemuDriverUnlock(driver); qemuDriverUnlock(driver);
cleanup: cleanup:
......
...@@ -1442,6 +1442,28 @@ char *qemuMigrationBegin(virQEMUDriverPtr driver, ...@@ -1442,6 +1442,28 @@ char *qemuMigrationBegin(virQEMUDriverPtr driver,
QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
goto cleanup; goto cleanup;
if (flags & VIR_MIGRATE_OFFLINE) {
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
VIR_MIGRATE_NON_SHARED_INC)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("offline migration cannot handle "
"non-shared storage"));
goto cleanup;
}
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("offline migration must be specified with "
"the persistent flag set"));
goto cleanup;
}
if (flags & VIR_MIGRATE_TUNNELLED) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("tunnelled offline migration does not "
"make sense"));
goto cleanup;
}
}
if (xmlin) { if (xmlin) {
if (!(def = virDomainDefParseString(driver->caps, xmlin, if (!(def = virDomainDefParseString(driver->caps, xmlin,
QEMU_EXPECTED_VIRT_TYPES, QEMU_EXPECTED_VIRT_TYPES,
...@@ -1499,7 +1521,8 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, ...@@ -1499,7 +1521,8 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
const char *dname, const char *dname,
const char *dom_xml, const char *dom_xml,
const char *migrateFrom, const char *migrateFrom,
virStreamPtr st) virStreamPtr st,
unsigned long flags)
{ {
virDomainDefPtr def = NULL; virDomainDefPtr def = NULL;
virDomainObjPtr vm = NULL; virDomainObjPtr vm = NULL;
...@@ -1512,10 +1535,33 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, ...@@ -1512,10 +1535,33 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
bool tunnel = !!st; bool tunnel = !!st;
char *origname = NULL; char *origname = NULL;
char *xmlout = NULL; char *xmlout = NULL;
unsigned int cookieFlags;
if (virTimeMillisNow(&now) < 0) if (virTimeMillisNow(&now) < 0)
return -1; return -1;
if (flags & VIR_MIGRATE_OFFLINE) {
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
VIR_MIGRATE_NON_SHARED_INC)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("offline migration cannot handle "
"non-shared storage"));
goto cleanup;
}
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("offline migration must be specified with "
"the persistent flag set"));
goto cleanup;
}
if (tunnel) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("tunnelled offline migration does not "
"make sense"));
goto cleanup;
}
}
if (!(def = virDomainDefParseString(driver->caps, dom_xml, if (!(def = virDomainDefParseString(driver->caps, dom_xml,
QEMU_EXPECTED_VIRT_TYPES, QEMU_EXPECTED_VIRT_TYPES,
VIR_DOMAIN_XML_INACTIVE))) VIR_DOMAIN_XML_INACTIVE)))
...@@ -1599,6 +1645,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, ...@@ -1599,6 +1645,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
/* Domain starts inactive, even if the domain XML had an id field. */ /* Domain starts inactive, even if the domain XML had an id field. */
vm->def->id = -1; vm->def->id = -1;
if (flags & VIR_MIGRATE_OFFLINE)
goto done;
if (tunnel && if (tunnel &&
(pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) { (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
virReportSystemError(errno, "%s", virReportSystemError(errno, "%s",
...@@ -1640,8 +1689,14 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, ...@@ -1640,8 +1689,14 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
VIR_DEBUG("Received no lockstate"); VIR_DEBUG("Received no lockstate");
} }
done:
if (flags & VIR_MIGRATE_OFFLINE)
cookieFlags = 0;
else
cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS;
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { cookieFlags) < 0) {
/* We could tear down the whole guest here, but /* We could tear down the whole guest here, but
* cookie data is (so far) non-critical, so that * cookie data is (so far) non-critical, so that
* seems a little harsh. We'll just warn for now. * seems a little harsh. We'll just warn for now.
...@@ -1652,10 +1707,12 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, ...@@ -1652,10 +1707,12 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0) if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
goto endjob; goto endjob;
virDomainAuditStart(vm, "migrated", true); if (!(flags & VIR_MIGRATE_OFFLINE)) {
event = virDomainEventNewFromObj(vm, virDomainAuditStart(vm, "migrated", true);
VIR_DOMAIN_EVENT_STARTED, event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_STARTED_MIGRATED); VIR_DOMAIN_EVENT_STARTED,
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
}
/* We keep the job active across API calls until the finish() call. /* We keep the job active across API calls until the finish() call.
* This prevents any other APIs being invoked while incoming * This prevents any other APIs being invoked while incoming
...@@ -1708,7 +1765,8 @@ qemuMigrationPrepareTunnel(virQEMUDriverPtr driver, ...@@ -1708,7 +1765,8 @@ qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
int *cookieoutlen, int *cookieoutlen,
virStreamPtr st, virStreamPtr st,
const char *dname, const char *dname,
const char *dom_xml) const char *dom_xml,
unsigned long flags)
{ {
int ret; int ret;
...@@ -1722,7 +1780,7 @@ qemuMigrationPrepareTunnel(virQEMUDriverPtr driver, ...@@ -1722,7 +1780,7 @@ qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
*/ */
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
cookieout, cookieoutlen, dname, dom_xml, cookieout, cookieoutlen, dname, dom_xml,
"stdio", st); "stdio", st, flags);
return ret; return ret;
} }
...@@ -1737,7 +1795,8 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver, ...@@ -1737,7 +1795,8 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
const char *uri_in, const char *uri_in,
char **uri_out, char **uri_out,
const char *dname, const char *dname,
const char *dom_xml) const char *dom_xml,
unsigned long flags)
{ {
static int port = 0; static int port = 0;
int this_port; int this_port;
...@@ -1833,7 +1892,7 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver, ...@@ -1833,7 +1892,7 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
cookieout, cookieoutlen, dname, dom_xml, cookieout, cookieoutlen, dname, dom_xml,
migrateFrom, NULL); migrateFrom, NULL, flags);
cleanup: cleanup:
VIR_FREE(hostname); VIR_FREE(hostname);
if (ret != 0) if (ret != 0)
...@@ -2679,6 +2738,14 @@ static int doPeer2PeerMigrate3(virQEMUDriverPtr driver, ...@@ -2679,6 +2738,14 @@ static int doPeer2PeerMigrate3(virQEMUDriverPtr driver,
if (ret == -1) if (ret == -1)
goto cleanup; goto cleanup;
if (flags & VIR_MIGRATE_OFFLINE) {
VIR_DEBUG("Offline migration, skipping Perform phase");
VIR_FREE(cookieout);
cookieoutlen = 0;
cancelled = 0;
goto finish;
}
if (!(flags & VIR_MIGRATE_TUNNELLED) && if (!(flags & VIR_MIGRATE_TUNNELLED) &&
(uri_out == NULL)) { (uri_out == NULL)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
...@@ -2817,6 +2884,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, ...@@ -2817,6 +2884,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
virConnectPtr dconn = NULL; virConnectPtr dconn = NULL;
bool p2p; bool p2p;
virErrorPtr orig_err = NULL; virErrorPtr orig_err = NULL;
bool offline;
VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, " VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
"uri=%s, flags=%lx, dname=%s, resource=%lu", "uri=%s, flags=%lx, dname=%s, resource=%lu",
...@@ -2849,6 +2917,9 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, ...@@ -2849,6 +2917,9 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
*/ */
*v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_V3); VIR_DRV_FEATURE_MIGRATION_V3);
if (flags & VIR_MIGRATE_OFFLINE)
offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_OFFLINE);
qemuDomainObjExitRemoteWithDriver(driver, vm); qemuDomainObjExitRemoteWithDriver(driver, vm);
if (!p2p) { if (!p2p) {
...@@ -2857,8 +2928,15 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, ...@@ -2857,8 +2928,15 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
goto cleanup; goto cleanup;
} }
if (flags & VIR_MIGRATE_OFFLINE && !offline) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("offline migration is not supported by "
"the destination host"));
goto cleanup;
}
/* domain may have been stopped while we were talking to remote daemon */ /* domain may have been stopped while we were talking to remote daemon */
if (!virDomainObjIsActive(vm)) { if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit")); _("guest unexpectedly quit"));
goto cleanup; goto cleanup;
...@@ -2921,7 +2999,7 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver, ...@@ -2921,7 +2999,7 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver,
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup; goto cleanup;
if (!virDomainObjIsActive(vm)) { if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_OPERATION_INVALID, virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running")); "%s", _("domain is not running"));
goto endjob; goto endjob;
...@@ -3245,26 +3323,27 @@ qemuMigrationFinish(virQEMUDriverPtr driver, ...@@ -3245,26 +3323,27 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
* object, but if no, clean up the empty qemu process. * object, but if no, clean up the empty qemu process.
*/ */
if (retcode == 0) { if (retcode == 0) {
if (!virDomainObjIsActive(vm)) { if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit")); _("guest unexpectedly quit"));
goto endjob; goto endjob;
} }
if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { if (!(flags & VIR_MIGRATE_OFFLINE)) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
VIR_QEMU_PROCESS_STOP_MIGRATED); qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
virDomainAuditStop(vm, "failed"); VIR_QEMU_PROCESS_STOP_MIGRATED);
event = virDomainEventNewFromObj(vm, virDomainAuditStop(vm, "failed");
VIR_DOMAIN_EVENT_STOPPED, event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED_FAILED); VIR_DOMAIN_EVENT_STOPPED,
goto endjob; VIR_DOMAIN_EVENT_STOPPED_FAILED);
goto endjob;
}
if (mig->network)
if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
VIR_WARN("unable to provide network data for relocation");
} }
if (mig->network)
if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
VIR_WARN("unable to provide network data for relocation");
if (flags & VIR_MIGRATE_PERSIST_DEST) { if (flags & VIR_MIGRATE_PERSIST_DEST) {
virDomainDefPtr vmdef; virDomainDefPtr vmdef;
if (vm->persistent) if (vm->persistent)
...@@ -3290,9 +3369,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver, ...@@ -3290,9 +3369,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
* to restart during confirm() step, so we kill it off now. * to restart during confirm() step, so we kill it off now.
*/ */
if (v3proto) { if (v3proto) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, if (!(flags & VIR_MIGRATE_OFFLINE)) {
VIR_QEMU_PROCESS_STOP_MIGRATED); qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
virDomainAuditStop(vm, "failed"); VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "failed");
}
if (newVM) if (newVM)
vm->persistent = 0; vm->persistent = 0;
} }
...@@ -3312,7 +3393,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, ...@@ -3312,7 +3393,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
event = NULL; event = NULL;
} }
if (!(flags & VIR_MIGRATE_PAUSED)) { if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
/* run 'cont' on the destination, which allows migration on qemu /* run 'cont' on the destination, which allows migration on qemu
* >= 0.10.6 to work properly. This isn't strictly necessary on * >= 0.10.6 to work properly. This isn't strictly necessary on
* older qemu's, but it also doesn't hurt anything there * older qemu's, but it also doesn't hurt anything there
...@@ -3350,25 +3431,30 @@ qemuMigrationFinish(virQEMUDriverPtr driver, ...@@ -3350,25 +3431,30 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
event = virDomainEventNewFromObj(vm, if (!(flags & VIR_MIGRATE_OFFLINE)) {
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
if (event)
qemuDomainEventQueue(driver, event);
event = virDomainEventNewFromObj(vm, event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
VIR_DOMAIN_PAUSED_USER);
if (event)
qemuDomainEventQueue(driver, event);
event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
}
} }
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
if (virDomainObjIsActive(vm) &&
virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
VIR_WARN("Failed to save status on vm %s", vm->def->name); VIR_WARN("Failed to save status on vm %s", vm->def->name);
goto endjob; goto endjob;
} }
/* Guest is successfully running, so cancel previous auto destroy */ /* Guest is successfully running, so cancel previous auto destroy */
qemuProcessAutoDestroyRemove(driver, vm); qemuProcessAutoDestroyRemove(driver, vm);
} else { } else if (!(flags & VIR_MIGRATE_OFFLINE)) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
VIR_QEMU_PROCESS_STOP_MIGRATED); VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "failed"); virDomainAuditStop(vm, "failed");
...@@ -3430,6 +3516,9 @@ int qemuMigrationConfirm(virQEMUDriverPtr driver, ...@@ -3430,6 +3516,9 @@ int qemuMigrationConfirm(virQEMUDriverPtr driver,
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
return -1; return -1;
if (flags & VIR_MIGRATE_OFFLINE)
goto done;
/* Did the migration go as planned? If yes, kill off the /* Did the migration go as planned? If yes, kill off the
* domain object, but if no, resume CPUs * domain object, but if no, resume CPUs
*/ */
...@@ -3465,6 +3554,7 @@ int qemuMigrationConfirm(virQEMUDriverPtr driver, ...@@ -3465,6 +3554,7 @@ int qemuMigrationConfirm(virQEMUDriverPtr driver,
} }
} }
done:
qemuMigrationCookieFree(mig); qemuMigrationCookieFree(mig);
rv = 0; rv = 0;
......
...@@ -36,7 +36,8 @@ ...@@ -36,7 +36,8 @@
VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_DISK | \
VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_NON_SHARED_INC | \
VIR_MIGRATE_CHANGE_PROTECTION | \ VIR_MIGRATE_CHANGE_PROTECTION | \
VIR_MIGRATE_UNSAFE) VIR_MIGRATE_UNSAFE | \
VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { enum qemuMigrationJobPhase {
QEMU_MIGRATION_PHASE_NONE = 0, QEMU_MIGRATION_PHASE_NONE = 0,
...@@ -97,7 +98,8 @@ int qemuMigrationPrepareTunnel(virQEMUDriverPtr driver, ...@@ -97,7 +98,8 @@ int qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
int *cookieoutlen, int *cookieoutlen,
virStreamPtr st, virStreamPtr st,
const char *dname, const char *dname,
const char *dom_xml); const char *dom_xml,
unsigned long flags);
int qemuMigrationPrepareDirect(virQEMUDriverPtr driver, int qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
virConnectPtr dconn, virConnectPtr dconn,
...@@ -108,7 +110,8 @@ int qemuMigrationPrepareDirect(virQEMUDriverPtr driver, ...@@ -108,7 +110,8 @@ int qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
const char *uri_in, const char *uri_in,
char **uri_out, char **uri_out,
const char *dname, const char *dname,
const char *dom_xml); const char *dom_xml,
unsigned long flags);
int qemuMigrationPerform(virQEMUDriverPtr driver, int qemuMigrationPerform(virQEMUDriverPtr driver,
virConnectPtr conn, virConnectPtr conn,
......
...@@ -6803,6 +6803,7 @@ static const vshCmdInfo info_migrate[] = { ...@@ -6803,6 +6803,7 @@ static const vshCmdInfo info_migrate[] = {
static const vshCmdOptDef opts_migrate[] = { static const vshCmdOptDef opts_migrate[] = {
{"live", VSH_OT_BOOL, 0, N_("live migration")}, {"live", VSH_OT_BOOL, 0, N_("live migration")},
{"offline", VSH_OT_BOOL, 0, N_("offline migration")},
{"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")}, {"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")},
{"direct", VSH_OT_BOOL, 0, N_("direct migration")}, {"direct", VSH_OT_BOOL, 0, N_("direct migration")},
{"tunneled", VSH_OT_ALIAS, 0, "tunnelled"}, {"tunneled", VSH_OT_ALIAS, 0, "tunnelled"},
...@@ -6888,6 +6889,10 @@ doMigrate(void *opaque) ...@@ -6888,6 +6889,10 @@ doMigrate(void *opaque)
if (vshCommandOptBool(cmd, "unsafe")) if (vshCommandOptBool(cmd, "unsafe"))
flags |= VIR_MIGRATE_UNSAFE; flags |= VIR_MIGRATE_UNSAFE;
if (vshCommandOptBool(cmd, "offline")) {
flags |= VIR_MIGRATE_OFFLINE;
}
if (xmlfile && if (xmlfile &&
virFileReadAll(xmlfile, 8192, &xml) < 0) { virFileReadAll(xmlfile, 8192, &xml) < 0) {
vshError(ctl, _("file '%s' doesn't exist"), xmlfile); vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
......
...@@ -1040,15 +1040,18 @@ I<--total> for only the total stats, I<start> for only the per-cpu ...@@ -1040,15 +1040,18 @@ I<--total> for only the total stats, I<start> for only the per-cpu
stats of the CPUs from I<start>, I<count> for only I<count> CPUs' stats of the CPUs from I<start>, I<count> for only I<count> CPUs'
stats. stats.
=item B<migrate> [I<--live>] [I<--direct>] [I<--p2p> [I<--tunnelled>]] =item B<migrate> [I<--live>] [I<--offline>] [I<--direct>] [I<--p2p> [I<--tunnelled>]]
[I<--persistent>] [I<--undefinesource>] [I<--suspend>] [I<--copy-storage-all>] [I<--persistent>] [I<--undefinesource>] [I<--suspend>] [I<--copy-storage-all>]
[I<--copy-storage-inc>] [I<--change-protection>] [I<--unsafe>] [I<--verbose>] [I<--copy-storage-inc>] [I<--change-protection>] [I<--unsafe>] [I<--verbose>]
I<domain> I<desturi> [I<migrateuri>] [I<dname>] I<domain> I<desturi> [I<migrateuri>] [I<dname>]
[I<--timeout> B<seconds>] [I<--xml> B<file>] [I<--timeout> B<seconds>] [I<--xml> B<file>]
Migrate domain to another host. Add I<--live> for live migration; I<--p2p> Migrate domain to another host. Add I<--live> for live migration; <--p2p>
for peer-2-peer migration; I<--direct> for direct migration; or I<--tunnelled> for peer-2-peer migration; I<--direct> for direct migration; or I<--tunnelled>
for tunnelled migration. I<--persistent> leaves the domain persistent on for tunnelled migration. I<--offline> migrates domain definition without
starting the domain on destination and without stopping it on source host.
Offline migration may be used with inactive domains and it must be used with
I<--persistent> option. I<--persistent> leaves the domain persistent on
destination host, I<--undefinesource> undefines the domain on the source host, destination host, I<--undefinesource> undefines the domain on the source host,
and I<--suspend> leaves the domain paused on the destination host. and I<--suspend> leaves the domain paused on the destination host.
I<--copy-storage-all> indicates migration with non-shared storage with full I<--copy-storage-all> indicates migration with non-shared storage with full
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册