提交 fae0da5c 编写于 作者: D Daniel P. Berrange

Support a new peer-to-peer migration mode & public API

Introduces several new public API options for migration

 - VIR_MIGRATE_PEER2PEER: With this flag the client only
   invokes the virDomainMigratePerform method, expecting
   the source host driver to do whatever is required to
   complete the entire migration process.
 - VIR_MIGRATE_TUNNELLED: With this flag the actual data
   for migration will be tunnelled over the libvirtd RPC
   channel. This requires that VIR_MIGRATE_PEER2PEER is
   also set.
 - virDomainMigrateToURI: This is variant of the existing
   virDomainMigrate method which does not require any
   virConnectPtr for the destination host. Given suitable
   driver support, this allows for all the same modes as
   virDomainMigrate()

The URI for VIR_MIGRATE_PEER2PEER must be a valid libvirt
URI. For non-p2p migration a hypervisor specific migration
URI is used.

virDomainMigrateToURI without a PEER2PEER flag is only
support for Xen currently, and it involves XenD talking
directly to XenD, no libvirtd involved at all.

* include/libvirt/libvirt.h.in: Add VIR_MIGRATE_PEER2PEER
  flag for migration
* src/libvirt_internal.h: Add feature flags for peer to
  peer migration (VIR_FEATURE_MIGRATE_P2P) and direct
  migration (VIR_MIGRATE_PEER2PEER mode)
* src/libvirt.c: Implement support for VIR_MIGRATE_PEER2PEER
  and virDomainMigrateToURI APIs.
* src/xen/xen_driver.c: Advertise support for DIRECT migration
* src/xen/xend_internal.c: Add TODO item for p2p migration
* src/libvirt_public.syms: Export virDomainMigrateToURI
  method
* src/qemu/qemu_driver.c: Add support for PEER2PEER and
  migration, and adapt TUNNELLED migration.
* tools/virsh.c: Add --p2p and --direct args and use the
  new virDomainMigrateToURI method where possible.
上级 35e7f271
...@@ -336,8 +336,9 @@ typedef virDomainInterfaceStatsStruct *virDomainInterfaceStatsPtr; ...@@ -336,8 +336,9 @@ typedef virDomainInterfaceStatsStruct *virDomainInterfaceStatsPtr;
/* Domain migration flags. */ /* Domain migration flags. */
typedef enum { typedef enum {
VIR_MIGRATE_LIVE = 1, /* live migration */ VIR_MIGRATE_LIVE = (1 << 0), /* live migration */
VIR_MIGRATE_TUNNELLED = 2, /* tunnelled migration */ VIR_MIGRATE_PEER2PEER = (1 << 1), /* direct source -> dest host control channel */
VIR_MIGRATE_TUNNELLED = (1 << 2), /* tunnel migration data over libvirtd connection */
} virDomainMigrateFlags; } virDomainMigrateFlags;
/* Domain migration. */ /* Domain migration. */
...@@ -345,6 +346,10 @@ virDomainPtr virDomainMigrate (virDomainPtr domain, virConnectPtr dconn, ...@@ -345,6 +346,10 @@ virDomainPtr virDomainMigrate (virDomainPtr domain, virConnectPtr dconn,
unsigned long flags, const char *dname, unsigned long flags, const char *dname,
const char *uri, unsigned long bandwidth); const char *uri, unsigned long bandwidth);
int virDomainMigrateToURI (virDomainPtr domain, const char *duri,
unsigned long flags, const char *dname,
unsigned long bandwidth);
/** /**
* VIR_NODEINFO_MAXCPUS: * VIR_NODEINFO_MAXCPUS:
* @nodeinfo: virNodeInfo instance * @nodeinfo: virNodeInfo instance
......
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include "virterror_internal.h" #include "virterror_internal.h"
#include "logging.h" #include "logging.h"
#include "datatypes.h" #include "datatypes.h"
#include "libvirt_internal.h"
#include "driver.h" #include "driver.h"
#include "uuid.h" #include "uuid.h"
...@@ -3044,36 +3043,75 @@ virDomainMigrateVersion2 (virDomainPtr domain, ...@@ -3044,36 +3043,75 @@ virDomainMigrateVersion2 (virDomainPtr domain,
return ddomain; return ddomain;
} }
/*
* This is sort of a migration v3
*
* In this version, the client does not talk to the destination
* libvirtd. The source libvirtd will still try to talk to the
* destination libvirtd though, and will do the prepare/perform/finish
* steps.
*/
static int
virDomainMigratePeer2Peer (virDomainPtr domain,
unsigned long flags,
const char *dname,
const char *uri,
unsigned long bandwidth)
{
if (!domain->conn->driver->domainMigratePerform) {
virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
return -1;
}
/* Perform the migration. The driver isn't supposed to return
* until the migration is complete.
*/
return domain->conn->driver->domainMigratePerform(domain,
NULL, /* cookie */
0, /* cookielen */
uri,
flags,
dname,
bandwidth);
}
/* /*
* Tunnelled migration has the following flow: * This is a variation on v1 & 2 migration
*
* This is for hypervisors which can directly handshake
* without any libvirtd involvement on destination either
* from client, or source libvirt.
* *
* virDomainMigrate(src, uri) * eg, XenD can talk direct to XenD, so libvirtd on dest
* - virDomainMigratePerform(src, uri) * does not need to be involved at all, or even running
* - dst = virConnectOpen(uri)
* - virDomainMigratePrepareTunnel(dst)
* - while (1)
* - virStreamSend(dst, data)
* - virDomainMigrateFinish(dst)
* - virConnectClose(dst)
*/ */
static virDomainPtr static int
virDomainMigrateTunnelled(virDomainPtr domain, virDomainMigrateDirect (virDomainPtr domain,
virConnectPtr dconn, unsigned long flags,
unsigned long flags, const char *dname,
const char *dname, const char *uri,
const char *uri, unsigned long bandwidth)
unsigned long bandwidth) {
{ if (!domain->conn->driver->domainMigratePerform) {
virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
return -1;
}
/* Perform the migration. The driver isn't supposed to return /* Perform the migration. The driver isn't supposed to return
* until the migration is complete. * until the migration is complete.
*/ */
if (domain->conn->driver->domainMigratePerform return domain->conn->driver->domainMigratePerform(domain,
(domain, NULL, 0, uri, flags, dname, bandwidth) == -1) NULL, /* cookie */
return NULL; 0, /* cookielen */
uri,
return virDomainLookupByName(dconn, dname ? dname : domain->name); flags,
dname,
bandwidth);
} }
/** /**
* virDomainMigrate: * virDomainMigrate:
* @domain: a domain object * @domain: a domain object
...@@ -3087,24 +3125,34 @@ virDomainMigrateTunnelled(virDomainPtr domain, ...@@ -3087,24 +3125,34 @@ virDomainMigrateTunnelled(virDomainPtr domain,
* host given by dconn (a connection to the destination host). * host given by dconn (a connection to the destination host).
* *
* Flags may be one of more of the following: * Flags may be one of more of the following:
* VIR_MIGRATE_LIVE Attempt a live migration. * VIR_MIGRATE_LIVE Do not pause the VM during migration
* VIR_MIGRATE_TUNNELLED Attempt to do a migration tunnelled through the * VIR_MIGRATE_PEER2PEER Direct connection between source & destination hosts
* libvirt RPC mechanism * VIR_MIGRATE_TUNNELLED Tunnel migration data over the libvirt RPC channel
*
* VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set.
* Applications using the VIR_MIGRATE_PEER2PEER flag will probably
* prefer to invoke virDomainMigrateToURI, avoiding the need to
* open connection to the destination host themselves.
* *
* If a hypervisor supports renaming domains during migration, * If a hypervisor supports renaming domains during migration,
* then you may set the dname parameter to the new name (otherwise * then you may set the dname parameter to the new name (otherwise
* it keeps the same name). If this is not supported by the * it keeps the same name). If this is not supported by the
* hypervisor, dname must be NULL or else you will get an error. * hypervisor, dname must be NULL or else you will get an error.
* *
* Since typically the two hypervisors connect directly to each * If the VIR_MIGRATE_PEER2PEER flag is set, the uri parameter
* other in order to perform the migration, you may need to specify * must be a valid libvirt connection URI, by which the source
* a path from the source to the destination. This is the purpose * libvirt driver can connect to the destination libvirt. If
* of the uri parameter. If uri is NULL, then libvirt will try to * omitted, the dconn connection object will be queried for its
* find the best method. Uri may specify the hostname or IP address * current URI.
* of the destination host as seen from the source. Or uri may be *
* a URI giving transport, hostname, user, port, etc. in the usual * If the VIR_MIGRATE_PEER2PEER flag is NOT set, the URI parameter
* form. Refer to driver documentation for the particular URIs * takes a hypervisor specific format. The hypervisor capabilities
* supported. * XML includes details of the support URI schemes. If omitted
* the dconn will be asked for a default URI.
*
* In either case it is typically only neccessary to specify a
* URI if the destination host has multiple interfaces and a
* specific interface is required to transmit migration data.
* *
* The maximum bandwidth (in Mbps) that will be used to do migration * The maximum bandwidth (in Mbps) that will be used to do migration
* can be specified with the bandwidth parameter. If set to 0, * can be specified with the bandwidth parameter. If set to 0,
...@@ -3159,18 +3207,35 @@ virDomainMigrate (virDomainPtr domain, ...@@ -3159,18 +3207,35 @@ virDomainMigrate (virDomainPtr domain,
goto error; goto error;
} }
if (flags & VIR_MIGRATE_TUNNELLED) { if (flags & VIR_MIGRATE_PEER2PEER) {
char *dstURI = NULL; if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
if (uri == NULL) { VIR_DRV_FEATURE_MIGRATION_P2P)) {
dstURI = virConnectGetURI(dconn); char *dstURI = NULL;
if (!uri) if (uri == NULL) {
return NULL; dstURI = virConnectGetURI(dconn);
} if (!uri)
return NULL;
}
ddomain = virDomainMigrateTunnelled(domain, dconn, flags, dname, uri ? uri : dstURI, bandwidth); if (virDomainMigratePeer2Peer(domain, flags, dname, uri ? uri : dstURI, bandwidth) < 0) {
VIR_FREE(dstURI);
goto error;
}
VIR_FREE(dstURI);
VIR_FREE(dstURI); ddomain = virDomainLookupByName (dconn, dname ? dname : domain->name);
} else {
/* This driver does not support peer to peer migration */
virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
goto error;
}
} else { } else {
if (flags & VIR_MIGRATE_TUNNELLED) {
virLibConnError(domain->conn, VIR_ERR_OPERATION_INVALID,
_("cannot perform tunnelled migration without using peer2peer flag"));
goto error;
}
/* Check that migration is supported by both drivers. */ /* Check that migration is supported by both drivers. */
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_V1) && VIR_DRV_FEATURE_MIGRATION_V1) &&
...@@ -3183,13 +3248,14 @@ virDomainMigrate (virDomainPtr domain, ...@@ -3183,13 +3248,14 @@ virDomainMigrate (virDomainPtr domain,
VIR_DRV_FEATURE_MIGRATION_V2)) VIR_DRV_FEATURE_MIGRATION_V2))
ddomain = virDomainMigrateVersion2(domain, dconn, flags, dname, uri, bandwidth); ddomain = virDomainMigrateVersion2(domain, dconn, flags, dname, uri, bandwidth);
else { else {
/* This driver does not support any migration method */
virLibConnError(domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__); virLibConnError(domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
goto error; goto error;
} }
} }
if (ddomain == NULL) if (ddomain == NULL)
goto error; goto error;
return ddomain; return ddomain;
...@@ -3199,6 +3265,122 @@ error: ...@@ -3199,6 +3265,122 @@ error:
return NULL; return NULL;
} }
/**
* virDomainMigrateToURI:
* @domain: a domain object
* @duri: mandatory URI for the destination host
* @flags: flags
* @dname: (optional) rename domain to this at destination
* @bandwidth: (optional) specify migration bandwidth limit in Mbps
*
* Migrate the domain object from its current host to the destination
* host given by dconn (a connection to the destination host).
*
* Flags may be one of more of the following:
* VIR_MIGRATE_LIVE Do not pause the VM during migration
* VIR_MIGRATE_PEER2PEER Direct connection between source & destination hosts
* VIR_MIGRATE_TUNNELLED Tunnel migration data over the libvirt RPC channel
*
* VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set.
* Applications using the VIR_MIGRATE_PEER2PEER flag will probably
* prefer to invoke virDomainMigrateToURI, avoiding the need to
* open connection to the destination host themselves.
*
* If a hypervisor supports renaming domains during migration,
* then you may set the dname parameter to the new name (otherwise
* it keeps the same name). If this is not supported by the
* hypervisor, dname must be NULL or else you will get an error.
*
* If the VIR_MIGRATE_PEER2PEER flag is set, the duri parameter
* must be a valid libvirt connection URI, by which the source
* libvirt driver can connect to the destination libvirt. If
* omitted, the dconn connection object will be queried for its
* current URI.
*
* If the VIR_MIGRATE_PEER2PEER flag is NOT set, the duri parameter
* takes a hypervisor specific format. The hypervisor capabilities
* XML includes details of the support URI schemes. If omitted
* the dconn will be asked for a default URI. Not all hypervisors
* will support this mode of migration, so if the VIR_MIGRATE_PEER2PEER
* flag is not set, then it may be neccessary to use the alternative
* virDomainMigrate API providing an explicit virConnectPtr for the
* destination host
*
* The maximum bandwidth (in Mbps) that will be used to do migration
* can be specified with the bandwidth parameter. If set to 0,
* libvirt will choose a suitable default. Some hypervisors do
* not support this feature and will return an error if bandwidth
* is not 0.
*
* To see which features are supported by the current hypervisor,
* see virConnectGetCapabilities, /capabilities/host/migration_features.
*
* There are many limitations on migration imposed by the underlying
* technology - for example it may not be possible to migrate between
* different processors even with the same architecture, or between
* different types of hypervisor.
*
* Returns 0 if the migration succeeded, -1 upon error.
*/
int
virDomainMigrateToURI (virDomainPtr domain,
const char *duri,
unsigned long flags,
const char *dname,
unsigned long bandwidth)
{
DEBUG("domain=%p, duri=%p, flags=%lu, dname=%s, bandwidth=%lu",
domain, NULLSTR(duri), flags, NULLSTR(dname), bandwidth);
virResetLastError();
/* First checkout the source */
if (!VIR_IS_CONNECTED_DOMAIN (domain)) {
virLibDomainError(NULL, VIR_ERR_INVALID_DOMAIN, __FUNCTION__);
return -1;
}
if (domain->conn->flags & VIR_CONNECT_RO) {
virLibDomainError(domain, VIR_ERR_OPERATION_DENIED, __FUNCTION__);
goto error;
}
if (duri == NULL) {
virLibConnError (domain->conn, VIR_ERR_INVALID_ARG, __FUNCTION__);
goto error;
}
if (flags & VIR_MIGRATE_PEER2PEER) {
if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_P2P)) {
if (virDomainMigratePeer2Peer (domain, flags, dname, duri, bandwidth) < 0)
goto error;
} else {
/* No peer to peer migration supported */
virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
goto error;
}
} else {
if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_DIRECT)) {
if (virDomainMigrateDirect (domain, flags, dname, duri, bandwidth) < 0)
goto error;
} else {
/* Cannot do a migration with only the perform step */
virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
goto error;
}
}
return 0;
error:
/* Copy to connection error object for back compatability */
virSetConnError(domain->conn);
return -1;
}
/* /*
* Not for public use. This function is part of the internal * Not for public use. This function is part of the internal
* implementation of migration in the remote case. * implementation of migration in the remote case.
......
...@@ -55,6 +55,17 @@ enum { ...@@ -55,6 +55,17 @@ enum {
* domainMigratePerform/domainMigrateFinish2. * domainMigratePerform/domainMigrateFinish2.
*/ */
VIR_DRV_FEATURE_MIGRATION_V2 = 3, VIR_DRV_FEATURE_MIGRATION_V2 = 3,
/* Driver supports peer-2-peer virDomainMigrate ie source host
* does all the prepare/perform/finish steps directly
*/
VIR_DRV_FEATURE_MIGRATION_P2P = 4,
/* Driver supports migration with only the source host involved,
* no libvirtd connetions on the destination at all, only the
* perform step is used.
*/
VIR_DRV_FEATURE_MIGRATION_DIRECT = 5,
}; };
......
...@@ -326,6 +326,7 @@ LIBVIRT_0.7.2 { ...@@ -326,6 +326,7 @@ LIBVIRT_0.7.2 {
virStreamFinish; virStreamFinish;
virStreamAbort; virStreamAbort;
virStreamFree; virStreamFree;
virDomainMigrateToURI;
} LIBVIRT_0.7.1; } LIBVIRT_0.7.1;
# .... define new API here using predicted next version number .... # .... define new API here using predicted next version number ....
...@@ -2382,8 +2382,11 @@ static int ...@@ -2382,8 +2382,11 @@ static int
qemudSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature) qemudSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature)
{ {
switch (feature) { switch (feature) {
case VIR_DRV_FEATURE_MIGRATION_V2: return 1; case VIR_DRV_FEATURE_MIGRATION_V2:
default: return 0; case VIR_DRV_FEATURE_MIGRATION_P2P:
return 1;
default:
return 0;
} }
} }
...@@ -6415,7 +6418,7 @@ static int doNativeMigrate(virDomainPtr dom, ...@@ -6415,7 +6418,7 @@ static int doNativeMigrate(virDomainPtr dom,
unsigned long resource) unsigned long resource)
{ {
int ret = -1; int ret = -1;
xmlURIPtr uribits; xmlURIPtr uribits = NULL;
int status; int status;
unsigned long long transferred, remaining, total; unsigned long long transferred, remaining, total;
...@@ -6683,6 +6686,57 @@ cleanup: ...@@ -6683,6 +6686,57 @@ cleanup:
} }
/* This is essentially a simplified re-impl of
* virDomainMigrateVersion2 from libvirt.c, but running in source
* libvirtd context, instead of client app context */
static int doNonTunnelMigrate(virDomainPtr dom,
virConnectPtr dconn,
virDomainObjPtr vm,
const char *dom_xml,
const char *uri ATTRIBUTE_UNUSED,
unsigned long flags,
const char *dname,
unsigned long resource)
{
virDomainPtr ddomain = NULL;
int retval = -1;
char *uri_out = NULL;
/* NB we don't pass 'uri' into this, since that's the libvirtd
* URI in this context - so we let dest pick it */
if (dconn->driver->domainMigratePrepare2(dconn,
NULL, /* cookie */
0, /* cookielen */
NULL, /* uri */
&uri_out,
flags, dname,
resource, dom_xml) < 0)
/* domainMigratePrepare2 sets the error for us */
goto cleanup;
if (uri_out == NULL) {
qemudReportError(NULL, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
_("domainMigratePrepare2 did not set uri"));
}
if (doNativeMigrate(dom, vm, uri_out, flags, dname, resource) < 0)
goto finish;
retval = 0;
finish:
dname = dname ? dname : dom->name;
ddomain = dconn->driver->domainMigrateFinish2
(dconn, dname, NULL, 0, uri_out, flags, retval);
if (ddomain)
virUnrefDomain(ddomain);
cleanup:
return retval;
}
static int doPeer2PeerMigrate(virDomainPtr dom, static int doPeer2PeerMigrate(virDomainPtr dom,
virDomainObjPtr vm, virDomainObjPtr vm,
const char *uri, const char *uri,
...@@ -6705,9 +6759,9 @@ static int doPeer2PeerMigrate(virDomainPtr dom, ...@@ -6705,9 +6759,9 @@ static int doPeer2PeerMigrate(virDomainPtr dom,
return -1; return -1;
} }
if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_V2)) { VIR_DRV_FEATURE_MIGRATION_P2P)) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, "%s", qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, "%s",
_("Destination libvirt does not support required migration protocol 2")); _("Destination libvirt does not support peer-to-peer migration protocol"));
goto cleanup; goto cleanup;
} }
...@@ -6718,7 +6772,10 @@ static int doPeer2PeerMigrate(virDomainPtr dom, ...@@ -6718,7 +6772,10 @@ static int doPeer2PeerMigrate(virDomainPtr dom,
goto cleanup; goto cleanup;
} }
ret = doTunnelMigrate(dom, dconn, vm, dom_xml, uri, flags, dname, resource); if (flags & VIR_MIGRATE_TUNNELLED)
ret = doTunnelMigrate(dom, dconn, vm, dom_xml, uri, flags, dname, resource);
else
ret = doNonTunnelMigrate(dom, dconn, vm, dom_xml, uri, flags, dname, resource);
cleanup: cleanup:
VIR_FREE(dom_xml); VIR_FREE(dom_xml);
......
...@@ -455,8 +455,11 @@ static int ...@@ -455,8 +455,11 @@ static int
xenUnifiedSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature) xenUnifiedSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature)
{ {
switch (feature) { switch (feature) {
case VIR_DRV_FEATURE_MIGRATION_V1: return 1; case VIR_DRV_FEATURE_MIGRATION_V1:
default: return 0; case VIR_DRV_FEATURE_MIGRATION_DIRECT:
return 1;
default:
return 0;
} }
} }
......
...@@ -4409,6 +4409,8 @@ xenDaemonDomainMigratePerform (virDomainPtr domain, ...@@ -4409,6 +4409,8 @@ xenDaemonDomainMigratePerform (virDomainPtr domain,
strcpy (live, "1"); strcpy (live, "1");
flags &= ~VIR_MIGRATE_LIVE; flags &= ~VIR_MIGRATE_LIVE;
} }
/* XXX we could easily do tunnelled & peer2peer migration too
if we want to. support these... */
if (flags != 0) { if (flags != 0) {
virXendError (conn, VIR_ERR_NO_SUPPORT, virXendError (conn, VIR_ERR_NO_SUPPORT,
"%s", _("xenDaemonDomainMigrate: unsupported flag")); "%s", _("xenDaemonDomainMigrate: unsupported flag"));
......
...@@ -2462,6 +2462,8 @@ static const vshCmdInfo info_migrate[] = { ...@@ -2462,6 +2462,8 @@ static const vshCmdInfo info_migrate[] = {
static const vshCmdOptDef opts_migrate[] = { static const vshCmdOptDef opts_migrate[] = {
{"live", VSH_OT_BOOL, 0, gettext_noop("live migration")}, {"live", VSH_OT_BOOL, 0, gettext_noop("live migration")},
{"p2p", VSH_OT_BOOL, 0, gettext_noop("peer-2-peer migration")},
{"direct", VSH_OT_BOOL, 0, gettext_noop("direct migration")},
{"tunnelled", VSH_OT_BOOL, 0, gettext_noop("tunnelled migration")}, {"tunnelled", VSH_OT_BOOL, 0, gettext_noop("tunnelled migration")},
{"domain", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("domain name, id or uuid")}, {"domain", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("domain name, id or uuid")},
{"desturi", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("connection URI of the destination host")}, {"desturi", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("connection URI of the destination host")},
...@@ -2478,8 +2480,6 @@ cmdMigrate (vshControl *ctl, const vshCmd *cmd) ...@@ -2478,8 +2480,6 @@ cmdMigrate (vshControl *ctl, const vshCmd *cmd)
const char *migrateuri; const char *migrateuri;
const char *dname; const char *dname;
int flags = 0, found, ret = FALSE; int flags = 0, found, ret = FALSE;
virConnectPtr dconn = NULL;
virDomainPtr ddom = NULL;
if (!vshConnectionUsability (ctl, ctl->conn, TRUE)) if (!vshConnectionUsability (ctl, ctl->conn, TRUE))
return FALSE; return FALSE;
...@@ -2499,40 +2499,41 @@ cmdMigrate (vshControl *ctl, const vshCmd *cmd) ...@@ -2499,40 +2499,41 @@ cmdMigrate (vshControl *ctl, const vshCmd *cmd)
if (vshCommandOptBool (cmd, "live")) if (vshCommandOptBool (cmd, "live"))
flags |= VIR_MIGRATE_LIVE; flags |= VIR_MIGRATE_LIVE;
if (vshCommandOptBool (cmd, "p2p"))
flags |= VIR_MIGRATE_PEER2PEER;
if (vshCommandOptBool (cmd, "tunnelled")) if (vshCommandOptBool (cmd, "tunnelled"))
flags |= VIR_MIGRATE_TUNNELLED; flags |= VIR_MIGRATE_TUNNELLED;
if (!(flags & VIR_MIGRATE_TUNNELLED)) { if ((flags & VIR_MIGRATE_PEER2PEER) ||
/* For regular live migration, temporarily connect to the destination vshCommandOptBool (cmd, "direct")) {
* host. For tunnelled migration, that will be done by the remote /* For peer2peer migration or direct migration we only expect one URI
* libvirtd. * a libvirt URI, or a hypervisor specific URI. */
*/
dconn = virConnectOpenAuth(desturi, virConnectAuthPtrDefault, 0);
if (!dconn) goto done;
}
else {
/* when doing tunnelled migration, use migrateuri if it's available,
* but if not, fall back to desturi. This allows both of these
* to work:
*
* virsh migrate guest qemu+tls://dest/system
* virsh migrate guest qemu+tls://dest/system qemu+tls://dest-alt/system
*/
if (migrateuri == NULL)
migrateuri = desturi;
}
/* Migrate. */ if (migrateuri != NULL) {
ddom = virDomainMigrate(dom, dconn, flags, dname, migrateuri, 0); vshError(ctl, FALSE, "%s", _("migrate: Unexpected migrateuri for peer2peer/direct migration"));
if (!ddom) goto done; goto done;
}
ret = TRUE; if (virDomainMigrateToURI (dom, desturi, flags, dname, 0) == 0)
ret = TRUE;
} else {
/* For traditional live migration, connect to the destination host directly. */
virConnectPtr dconn = NULL;
virDomainPtr ddom = NULL;
dconn = virConnectOpenAuth (desturi, virConnectAuthPtrDefault, 0);
if (!dconn) goto done;
ddom = virDomainMigrate (dom, dconn, flags, dname, migrateuri, 0);
if (ddom) {
virDomainFree(ddom);
ret = TRUE;
}
virConnectClose (dconn);
}
done: done:
if (dom) virDomainFree (dom); if (dom) virDomainFree (dom);
if (ddom) virDomainFree (ddom);
if (dconn) virConnectClose (dconn);
return ret; return ret;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册