/* * qemu_migration.c: QEMU migration handling * * Copyright (C) 2006-2011 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include #include #include #include #include "qemu_migration.h" #include "qemu_monitor.h" #include "qemu_domain.h" #include "qemu_process.h" #include "qemu_capabilities.h" #include "qemu_audit.h" #include "qemu_cgroup.h" #include "logging.h" #include "virterror_internal.h" #include "memory.h" #include "util.h" #include "files.h" #include "datatypes.h" #include "fdstream.h" #include "uuid.h" #define VIR_FROM_THIS VIR_FROM_QEMU #define timeval_to_ms(tv) (((tv).tv_sec * 1000ull) + ((tv).tv_usec / 1000)) enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LAST }; VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, "graphics"); enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), }; typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; typedef qemuMigrationCookieGraphics *qemuMigrationCookieGraphicsPtr; struct _qemuMigrationCookieGraphics { int type; int port; int tlsPort; char *listen; char *tlsSubject; }; typedef struct _qemuMigrationCookie qemuMigrationCookie; typedef qemuMigrationCookie *qemuMigrationCookiePtr; struct _qemuMigrationCookie { int flags; int flagsMandatory; /* Host properties */ unsigned char hostuuid[VIR_UUID_BUFLEN]; char *hostname; /* Guest properties */ unsigned char uuid[VIR_UUID_BUFLEN]; char *name; /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */ qemuMigrationCookieGraphicsPtr graphics; }; static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap) { if (!grap) return; VIR_FREE(grap->listen); VIR_FREE(grap->tlsSubject); VIR_FREE(grap); } static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig) { if (!mig) return; if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) qemuMigrationCookieGraphicsFree(mig->graphics); VIR_FREE(mig->hostname); VIR_FREE(mig->name); VIR_FREE(mig); } static char * qemuDomainExtractTLSSubject(const char *certdir) { char *certfile = NULL; char *subject = NULL; char *pemdata = NULL; gnutls_datum_t pemdatum; gnutls_x509_crt_t cert; int ret; size_t subjectlen; if (virAsprintf(&certfile, "%s/server-cert.pem", certdir) < 0) goto no_memory; if (virFileReadAll(certfile, 8192, &pemdata) < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("unable to read server cert %s"), certfile); goto error; } ret = gnutls_x509_crt_init(&cert); if (ret < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("cannot initialize cert object: %s"), gnutls_strerror(ret)); goto error; } pemdatum.data = (unsigned char *)pemdata; pemdatum.size = strlen(pemdata); ret = gnutls_x509_crt_import(cert, &pemdatum, GNUTLS_X509_FMT_PEM); if (ret < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("cannot load cert data from %s: %s"), certfile, gnutls_strerror(ret)); goto error; } subjectlen = 1024; if (VIR_ALLOC_N(subject, subjectlen+1) < 0) goto no_memory; gnutls_x509_crt_get_dn(cert, subject, &subjectlen); subject[subjectlen] = '\0'; VIR_FREE(certfile); VIR_FREE(pemdata); return subject; no_memory: virReportOOMError(); error: VIR_FREE(certfile); VIR_FREE(pemdata); return NULL; } static qemuMigrationCookieGraphicsPtr qemuMigrationCookieGraphicsAlloc(struct qemud_driver *driver, virDomainGraphicsDefPtr def) { qemuMigrationCookieGraphicsPtr mig = NULL; const char *listenAddr; if (VIR_ALLOC(mig) < 0) goto no_memory; mig->type = def->type; if (mig->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) { mig->port = def->data.vnc.port; listenAddr = def->data.vnc.listenAddr; if (!listenAddr) listenAddr = driver->vncListen; if (driver->vncTLS && !(mig->tlsSubject = qemuDomainExtractTLSSubject(driver->vncTLSx509certdir))) goto error; } else { mig->port = def->data.spice.port; if (driver->spiceTLS) mig->tlsPort = def->data.spice.tlsPort; else mig->tlsPort = -1; listenAddr = def->data.spice.listenAddr; if (!listenAddr) listenAddr = driver->spiceListen; if (driver->spiceTLS && !(mig->tlsSubject = qemuDomainExtractTLSSubject(driver->spiceTLSx509certdir))) goto error; } if (!(mig->listen = strdup(listenAddr))) goto no_memory; return mig; no_memory: virReportOOMError(); error: qemuMigrationCookieGraphicsFree(mig); return NULL; } static qemuMigrationCookiePtr qemuMigrationCookieNew(virDomainObjPtr dom) { qemuMigrationCookiePtr mig = NULL; if (VIR_ALLOC(mig) < 0) goto no_memory; if (!(mig->name = strdup(dom->def->name))) goto no_memory; memcpy(mig->uuid, dom->def->uuid, VIR_UUID_BUFLEN); if (!(mig->hostname = virGetHostname(NULL))) goto no_memory; if (virGetHostUUID(mig->hostuuid) < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Unable to obtain host UUID")); goto error; } return mig; no_memory: virReportOOMError(); error: qemuMigrationCookieFree(mig); return NULL; } static int qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig, struct qemud_driver *driver, virDomainObjPtr dom) { if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Migration graphics data already present")); return -1; } if (dom->def->ngraphics == 1 && (dom->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC || dom->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE)) { if (!(mig->graphics = qemuMigrationCookieGraphicsAlloc(driver, dom->def->graphics[0]))) return -1; mig->flags |= QEMU_MIGRATION_COOKIE_GRAPHICS; } return 0; } static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, qemuMigrationCookieGraphicsPtr grap) { virBufferAsprintf(buf, " type), grap->port, grap->listen); if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) virBufferAsprintf(buf, " tlsPort='%d'", grap->tlsPort); if (grap->tlsSubject) { virBufferAddLit(buf, ">\n"); virBufferEscapeString(buf, " \n", grap->tlsSubject); virBufferAddLit(buf, " \n"); } else { virBufferAddLit(buf, "/>\n"); } } static void qemuMigrationCookieXMLFormat(virBufferPtr buf, qemuMigrationCookiePtr mig) { char uuidstr[VIR_UUID_STRING_BUFLEN]; char hostuuidstr[VIR_UUID_STRING_BUFLEN]; int i; virUUIDFormat(mig->uuid, uuidstr); virUUIDFormat(mig->hostuuid, hostuuidstr); virBufferAsprintf(buf, "\n"); virBufferEscapeString(buf, " %s\n", mig->name); virBufferAsprintf(buf, " %s\n", uuidstr); virBufferEscapeString(buf, " %s\n", mig->hostname); virBufferAsprintf(buf, " %s\n", hostuuidstr); for (i = 0 ; i < QEMU_MIGRATION_COOKIE_FLAG_LAST ; i++) { if (mig->flagsMandatory & (1 << i)) virBufferAsprintf(buf, " \n", qemuMigrationCookieFlagTypeToString(i)); } if ((mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) && mig->graphics) qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics); virBufferAddLit(buf, "\n"); } static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig) { virBuffer buf = VIR_BUFFER_INITIALIZER; qemuMigrationCookieXMLFormat(&buf, mig); if (virBufferError(&buf)) { virReportOOMError(); return NULL; } return virBufferContentAndReset(&buf); } static qemuMigrationCookieGraphicsPtr qemuMigrationCookieGraphicsXMLParse(xmlXPathContextPtr ctxt) { qemuMigrationCookieGraphicsPtr grap; char *tmp; if (VIR_ALLOC(grap) < 0) goto no_memory; if (!(tmp = virXPathString("string(./graphics/@type)", ctxt))) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing type attribute in migration data")); goto error; } if ((grap->type = virDomainGraphicsTypeFromString(tmp)) < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("unknown graphics type %s"), tmp); VIR_FREE(tmp); goto error; } if (virXPathInt("string(./graphics/@port)", ctxt, &grap->port) < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing port attribute in migration data")); goto error; } if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) { if (virXPathInt("string(./graphics/@tlsPort)", ctxt, &grap->tlsPort) < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing tlsPort attribute in migration data")); goto error; } } if (!(grap->listen = virXPathString("string(./graphics/@listen)", ctxt))) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing listen attribute in migration data")); goto error; } /* Optional */ grap->tlsSubject = virXPathString("string(./graphics/cert[@info='subject']/@value)", ctxt); return grap; no_memory: virReportOOMError(); error: qemuMigrationCookieGraphicsFree(grap); return NULL; } static int qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, xmlXPathContextPtr ctxt, int flags) { char uuidstr[VIR_UUID_STRING_BUFLEN]; char *tmp; xmlNodePtr *nodes = NULL; int i, n; /* We don't store the uuid, name, hostname, or hostuuid * values. We just compare them to local data to do some * sanity checking on migration operation */ /* Extract domain name */ if (!(tmp = virXPathString("string(./name[1])", ctxt))) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing name element in migration data")); goto error; } if (STRNEQ(tmp, mig->name)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Incoming cookie data had unexpected name %s vs %s"), tmp, mig->name); goto error; } VIR_FREE(tmp); /* Extract domain uuid */ tmp = virXPathString("string(./uuid[1])", ctxt); if (!tmp) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing uuid element in migration data")); goto error; } virUUIDFormat(mig->uuid, uuidstr); if (STRNEQ(tmp, uuidstr)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Incoming cookie data had unexpected UUID %s vs %s"), tmp, uuidstr); } VIR_FREE(tmp); /* Check & forbid "localhost" migration */ if (!(tmp = virXPathString("string(./hostname[1])", ctxt))) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing hostname element in migration data")); goto error; } if (STREQ(tmp, mig->hostname)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Attempt to migrate guest to the same host %s"), tmp); goto error; } VIR_FREE(tmp); if (!(tmp = virXPathString("string(./hostuuid[1])", ctxt))) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing hostuuid element in migration data")); goto error; } virUUIDFormat(mig->hostuuid, uuidstr); if (STREQ(tmp, uuidstr)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Attempt to migrate guest to the same host %s"), tmp); goto error; } VIR_FREE(tmp); /* Check to ensure all mandatory features from XML are also * present in 'flags' */ if ((n = virXPathNodeSet("./features", ctxt, &nodes)) < 0) goto error; for (i = 0 ; i < n ; i++) { int val; char *str = virXMLPropString(nodes[i], "name"); if (!str) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("missing feature name")); goto error; } if ((val = qemuMigrationCookieFlagTypeFromString(str)) < 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Unknown migration cookie feature %s"), str); VIR_FREE(str); goto error; } if ((flags & (1 << val)) == 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Unsupported migration cookie feature %s"), str); VIR_FREE(str); } VIR_FREE(str); } VIR_FREE(nodes); if ((flags & QEMU_MIGRATION_COOKIE_GRAPHICS) && virXPathBoolean("count(./graphics) > 0", ctxt) && (!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt)))) goto error; return 0; error: VIR_FREE(tmp); VIR_FREE(nodes); return -1; } static int qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, const char *xml, int flags) { xmlDocPtr doc = NULL; xmlXPathContextPtr ctxt = NULL; int ret = -1; VIR_DEBUG("xml=%s", NULLSTR(xml)); if (!(doc = virXMLParseString(xml, "qemumigration.xml"))) goto cleanup; if ((ctxt = xmlXPathNewContext(doc)) == NULL) { virReportOOMError(); goto cleanup; } ctxt->node = xmlDocGetRootElement(doc); ret = qemuMigrationCookieXMLParse(mig, ctxt, flags); cleanup: xmlXPathFreeContext(ctxt); xmlFreeDoc(doc); return ret; } static int qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, struct qemud_driver *driver, virDomainObjPtr dom, char **cookieout, int *cookieoutlen, int flags) { if (!cookieout || !cookieoutlen) { qemuReportError(VIR_ERR_INVALID_ARG, "%s", _("missing migration cookie data")); return -1; } *cookieoutlen = 0; if (flags & QEMU_MIGRATION_COOKIE_GRAPHICS && qemuMigrationCookieAddGraphics(mig, driver, dom) < 0) return -1; if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig))) return -1; *cookieoutlen = strlen(*cookieout) + 1; VIR_DEBUG("cookielen=%d cookie=%s", *cookieoutlen, *cookieout); return 0; } static qemuMigrationCookiePtr qemuMigrationEatCookie(virDomainObjPtr dom, const char *cookiein, int cookieinlen, int flags) { qemuMigrationCookiePtr mig = NULL; /* Parse & validate incoming cookie (if any) */ if (cookiein && cookieinlen && cookiein[cookieinlen-1] != '\0') { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Migration cookie was not NULL terminated")); goto error; } VIR_DEBUG("cookielen=%d cookie='%s'", cookieinlen, NULLSTR(cookiein)); if (!(mig = qemuMigrationCookieNew(dom))) return NULL; if (cookiein && cookieinlen && qemuMigrationCookieXMLParseStr(mig, cookiein, flags) < 0) goto error; return mig; error: qemuMigrationCookieFree(mig); return NULL; } bool qemuMigrationIsAllowed(virDomainDefPtr def) { if (def->nhostdevs > 0) { qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", _("Domain with assigned host devices cannot be migrated")); return false; } return true; } /** qemuMigrationSetOffline * Pause domain for non-live migration. */ int qemuMigrationSetOffline(struct qemud_driver *driver, virDomainObjPtr vm) { int ret; ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION); if (ret == 0) { virDomainEventPtr event; event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED); if (event) qemuDomainEventQueue(driver, event); } return ret; } static int qemuMigrationProcessJobSignals(struct qemud_driver *driver, virDomainObjPtr vm, const char *job) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), job, _("guest unexpectedly quit")); return -1; } if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) { priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL; VIR_DEBUG("Cancelling job at client request"); qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorMigrateCancel(priv->mon); qemuDomainObjExitMonitorWithDriver(driver, vm); if (ret < 0) { VIR_WARN("Unable to cancel job"); } } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) { priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND; VIR_DEBUG("Pausing domain for non-live migration"); if (qemuMigrationSetOffline(driver, vm) < 0) VIR_WARN("Unable to pause domain"); } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) { unsigned long long ms = priv->jobSignalsData.migrateDowntime; priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME; priv->jobSignalsData.migrateDowntime = 0; VIR_DEBUG("Setting migration downtime to %llums", ms); qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorSetMigrationDowntime(priv->mon, ms); qemuDomainObjExitMonitorWithDriver(driver, vm); if (ret < 0) VIR_WARN("Unable to set migration downtime"); } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) { unsigned long bandwidth = priv->jobSignalsData.migrateBandwidth; priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED; priv->jobSignalsData.migrateBandwidth = 0; VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth); qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); qemuDomainObjExitMonitorWithDriver(driver, vm); if (ret < 0) VIR_WARN("Unable to set migration speed"); } else { ret = 0; } return ret; } static int qemuMigrationUpdateJobStatus(struct qemud_driver *driver, virDomainObjPtr vm, const char *job) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; int status; unsigned long long memProcessed; unsigned long long memRemaining; unsigned long long memTotal; struct timeval now; if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), job, _("guest unexpectedly quit")); return -1; } qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorGetMigrationStatus(priv->mon, &status, &memProcessed, &memRemaining, &memTotal); qemuDomainObjExitMonitorWithDriver(driver, vm); if (ret < 0) { priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; return -1; } if (gettimeofday(&now, NULL) < 0) { priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; virReportSystemError(errno, "%s", _("cannot get time of day")); return -1; } priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart; switch (status) { case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: priv->jobInfo.type = VIR_DOMAIN_JOB_NONE; qemuReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), job, _("is not active")); break; case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE: priv->jobInfo.dataTotal = memTotal; priv->jobInfo.dataRemaining = memRemaining; priv->jobInfo.dataProcessed = memProcessed; priv->jobInfo.memTotal = memTotal; priv->jobInfo.memRemaining = memRemaining; priv->jobInfo.memProcessed = memProcessed; ret = 0; break; case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED; ret = 0; break; case QEMU_MONITOR_MIGRATION_STATUS_ERROR: priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; qemuReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), job, _("unexpectedly failed")); break; case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED; qemuReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), job, _("canceled by client")); break; } return ret; } int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) { /* Poll every 50ms for progress & to allow cancellation */ struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull }; const char *job; switch (priv->jobActive) { case QEMU_JOB_MIGRATION_OUT: job = _("migration job"); break; case QEMU_JOB_SAVE: job = _("domain save job"); break; case QEMU_JOB_DUMP: job = _("domain core dump job"); break; default: job = _("job"); } if (qemuMigrationProcessJobSignals(driver, vm, job) < 0) goto cleanup; if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0) goto cleanup; virDomainObjUnlock(vm); qemuDriverUnlock(driver); nanosleep(&ts, NULL); qemuDriverLock(driver); virDomainObjLock(vm); } cleanup: if (priv->jobInfo.type == VIR_DOMAIN_JOB_COMPLETED) return 0; else return -1; } static int qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver, virDomainObjPtr vm, qemuMigrationCookiePtr cookie) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; if (!cookie) return 0; if (!cookie->graphics) return 0; /* QEMU doesn't support VNC relocation yet, so * skip it to avoid generating an error */ if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) return 0; qemuDomainObjEnterMonitorWithDriver(driver, vm); ret = qemuMonitorGraphicsRelocate(priv->mon, cookie->graphics->type, cookie->hostname, cookie->graphics->port, cookie->graphics->tlsPort, cookie->graphics->tlsSubject); qemuDomainObjExitMonitorWithDriver(driver, vm); return ret; } char *qemuMigrationBegin(struct qemud_driver *driver, virDomainObjPtr vm, const char *xmlin, char **cookieout, int *cookieoutlen) { char *rv = NULL; qemuMigrationCookiePtr mig = NULL; if (xmlin) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Passing XML for the target VM is not yet supported")); goto cleanup; } if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto cleanup; } if (!qemuMigrationIsAllowed(vm->def)) goto cleanup; if (!(mig = qemuMigrationEatCookie(vm, NULL, 0, 0))) goto cleanup; if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) goto cleanup; rv = qemuDomainFormatXML(driver, vm, VIR_DOMAIN_XML_SECURE | VIR_DOMAIN_XML_UPDATE_CPU); cleanup: virDomainObjUnlock(vm); qemuMigrationCookieFree(mig); return rv; } /* Prepare is the first step, and it runs on the destination host. * * This version starts an empty VM listening on a localhost TCP port, and * sets up the corresponding virStream to handle the incoming data. */ int qemuMigrationPrepareTunnel(struct qemud_driver *driver, virConnectPtr dconn, const char *cookiein, int cookieinlen, char **cookieout, int *cookieoutlen, virStreamPtr st, const char *dname, const char *dom_xml) { virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; virDomainEventPtr event = NULL; int ret = -1; int internalret; int dataFD[2] = { -1, -1 }; qemuDomainObjPrivatePtr priv = NULL; struct timeval now; qemuMigrationCookiePtr mig = NULL; if (gettimeofday(&now, NULL) < 0) { virReportSystemError(errno, "%s", _("cannot get time of day")); return -1; } /* Parse the domain XML. */ if (!(def = virDomainDefParseString(driver->caps, dom_xml, VIR_DOMAIN_XML_INACTIVE))) goto cleanup; if (!qemuMigrationIsAllowed(def)) goto cleanup; /* Target domain name, maybe renamed. */ if (dname) { VIR_FREE(def->name); def->name = strdup(dname); if (def->name == NULL) goto cleanup; } if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0) goto cleanup; if (!(vm = virDomainAssignDef(driver->caps, &driver->domains, def, true))) { /* virDomainAssignDef already set the error */ goto cleanup; } def = NULL; priv = vm->privateData; if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; priv->jobActive = QEMU_JOB_MIGRATION_OUT; /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; if (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0) { virReportSystemError(errno, "%s", _("cannot create pipe for tunnelled migration")); goto endjob; } /* Start the QEMU daemon, with the same command-line arguments plus * -incoming stdio (which qemu_command might convert to exec:cat or fd:n) */ internalret = qemuProcessStart(dconn, driver, vm, "stdio", true, dataFD[0], NULL, VIR_VM_OP_MIGRATE_IN_START); if (internalret < 0) { qemuAuditDomainStart(vm, "migrated", false); /* Note that we don't set an error here because qemuProcessStart * should have already done that. */ if (!vm->persistent) { virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } goto endjob; } if (virFDStreamOpen(st, dataFD[1]) < 0) { qemuAuditDomainStart(vm, "migrated", false); qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FAILED); if (!vm->persistent) { if (qemuDomainObjEndJob(vm) > 0) virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } virReportSystemError(errno, "%s", _("cannot pass pipe for tunnelled migration")); goto endjob; } dataFD[1] = -1; /* 'st' owns the FD now & will close it */ qemuAuditDomainStart(vm, "migrated", true); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_MIGRATED); if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { /* We could tear down the whole guest here, but * cookie data is (so far) non-critical, so that * seems a little harsh. We'll just warn for now. */ VIR_WARN("Unable to encode migration cookie"); } ret = 0; endjob: if (vm && qemuDomainObjEndJob(vm) == 0) vm = NULL; /* We set a fake job active which is held across * API calls until the finish() call. This prevents * any other APIs being invoked while incoming * migration is taking place */ if (vm && virDomainObjIsActive(vm)) { priv->jobActive = QEMU_JOB_MIGRATION_IN; priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; priv->jobStart = timeval_to_ms(now); } cleanup: virDomainDefFree(def); VIR_FORCE_CLOSE(dataFD[0]); VIR_FORCE_CLOSE(dataFD[1]); if (vm) virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); return ret; } int qemuMigrationPrepareDirect(struct qemud_driver *driver, virConnectPtr dconn, const char *cookiein, int cookieinlen, char **cookieout, int *cookieoutlen, const char *uri_in, char **uri_out, const char *dname, const char *dom_xml) { static int port = 0; virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; int this_port; char *hostname = NULL; char migrateFrom [64]; const char *p; virDomainEventPtr event = NULL; int ret = -1; int internalret; qemuDomainObjPrivatePtr priv = NULL; struct timeval now; qemuMigrationCookiePtr mig = NULL; if (gettimeofday(&now, NULL) < 0) { virReportSystemError(errno, "%s", _("cannot get time of day")); return -1; } /* The URI passed in may be NULL or a string "tcp://somehostname:port". * * If the URI passed in is NULL then we allocate a port number * from our pool of port numbers and return a URI of * "tcp://ourhostname:port". * * If the URI passed in is not NULL then we try to parse out the * port number and use that (note that the hostname is assumed * to be a correct hostname which refers to the target machine). */ if (uri_in == NULL) { this_port = QEMUD_MIGRATION_FIRST_PORT + port++; if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0; /* Get hostname */ if ((hostname = virGetHostname(NULL)) == NULL) goto cleanup; if (STRPREFIX(hostname, "localhost")) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("hostname on destination resolved to localhost, but migration requires an FQDN")); goto cleanup; } /* XXX this really should have been a properly well-formed * URI, but we can't add in tcp:// now without breaking * compatability with old targets. We at least make the * new targets accept both syntaxes though. */ /* Caller frees */ internalret = virAsprintf(uri_out, "tcp:%s:%d", hostname, this_port); if (internalret < 0) { virReportOOMError(); goto cleanup; } } else { /* Check the URI starts with "tcp:". We will escape the * URI when passing it to the qemu monitor, so bad * characters in hostname part don't matter. */ if (!STRPREFIX (uri_in, "tcp:")) { qemuReportError (VIR_ERR_INVALID_ARG, "%s", _("only tcp URIs are supported for KVM/QEMU migrations")); goto cleanup; } /* Get the port number. */ p = strrchr (uri_in, ':'); if (p == strchr(uri_in, ':')) { /* Generate a port */ this_port = QEMUD_MIGRATION_FIRST_PORT + port++; if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0; /* Caller frees */ if (virAsprintf(uri_out, "%s:%d", uri_in, this_port) < 0) { virReportOOMError(); goto cleanup; } } else { p++; /* definitely has a ':' in it, see above */ this_port = virParseNumber (&p); if (this_port == -1 || p-uri_in != strlen (uri_in)) { qemuReportError(VIR_ERR_INVALID_ARG, "%s", _("URI ended with incorrect ':port'")); goto cleanup; } } } if (*uri_out) VIR_DEBUG("Generated uri_out=%s", *uri_out); /* Parse the domain XML. */ if (!(def = virDomainDefParseString(driver->caps, dom_xml, VIR_DOMAIN_XML_INACTIVE))) goto cleanup; if (!qemuMigrationIsAllowed(def)) goto cleanup; /* Target domain name, maybe renamed. */ if (dname) { VIR_FREE(def->name); def->name = strdup(dname); if (def->name == NULL) goto cleanup; } if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0) goto cleanup; if (!(vm = virDomainAssignDef(driver->caps, &driver->domains, def, true))) { /* virDomainAssignDef already set the error */ goto cleanup; } def = NULL; priv = vm->privateData; if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; priv->jobActive = QEMU_JOB_MIGRATION_OUT; /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; /* Start the QEMU daemon, with the same command-line arguments plus * -incoming tcp:0.0.0.0:port */ snprintf (migrateFrom, sizeof (migrateFrom), "tcp:0.0.0.0:%d", this_port); if (qemuProcessStart(dconn, driver, vm, migrateFrom, true, -1, NULL, VIR_VM_OP_MIGRATE_IN_START) < 0) { qemuAuditDomainStart(vm, "migrated", false); /* Note that we don't set an error here because qemuProcessStart * should have already done that. */ if (!vm->persistent) { if (qemuDomainObjEndJob(vm) > 0) virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } goto endjob; } if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { /* We could tear down the whole guest here, but * cookie data is (so far) non-critical, so that * seems a little harsh. We'll just warn for now. */ VIR_WARN("Unable to encode migration cookie"); } qemuAuditDomainStart(vm, "migrated", true); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_MIGRATED); ret = 0; endjob: if (vm && qemuDomainObjEndJob(vm) == 0) vm = NULL; /* We set a fake job active which is held across * API calls until the finish() call. This prevents * any other APIs being invoked while incoming * migration is taking place */ if (vm && virDomainObjIsActive(vm)) { priv->jobActive = QEMU_JOB_MIGRATION_IN; priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; priv->jobStart = timeval_to_ms(now); } cleanup: VIR_FREE(hostname); virDomainDefFree(def); if (ret != 0) VIR_FREE(*uri_out); if (vm) virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); return ret; } /* Perform migration using QEMU's native TCP migrate support, * not encrypted obviously */ static int doNativeMigrate(struct qemud_driver *driver, virDomainObjPtr vm, const char *uri, const char *cookiein, int cookieinlen, char **cookieout, int *cookieoutlen, unsigned int flags, const char *dname ATTRIBUTE_UNUSED, unsigned long resource) { int ret = -1; xmlURIPtr uribits = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; qemuMigrationCookiePtr mig = NULL; if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) VIR_WARN("unable to provide data for graphics client relocation"); /* Issue the migrate command. */ if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) { /* HACK: source host generates bogus URIs, so fix them up */ char *tmpuri; if (virAsprintf(&tmpuri, "tcp://%s", uri + strlen("tcp:")) < 0) { virReportOOMError(); goto cleanup; } uribits = xmlParseURI(tmpuri); VIR_FREE(tmpuri); } else { uribits = xmlParseURI(uri); } if (!uribits) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("cannot parse URI %s"), uri); goto cleanup; } /* Before EnterMonitor, since qemuProcessStopCPUs already does that */ if (!(flags & VIR_MIGRATE_LIVE) && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { if (qemuMigrationSetOffline(driver, vm) < 0) goto cleanup; } qemuDomainObjEnterMonitorWithDriver(driver, vm); if (resource > 0 && qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) { qemuDomainObjExitMonitorWithDriver(driver, vm); goto cleanup; } if (flags & VIR_MIGRATE_NON_SHARED_DISK) background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK; if (flags & VIR_MIGRATE_NON_SHARED_INC) background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC; if (qemuMonitorMigrateToHost(priv->mon, background_flags, uribits->server, uribits->port) < 0) { qemuDomainObjExitMonitorWithDriver(driver, vm); goto cleanup; } qemuDomainObjExitMonitorWithDriver(driver, vm); if (qemuMigrationWaitForCompletion(driver, vm) < 0) goto cleanup; if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) VIR_WARN("Unable to encode migration cookie"); ret = 0; cleanup: qemuMigrationCookieFree(mig); xmlFreeURI(uribits); return ret; } #define TUNNEL_SEND_BUF_SIZE 65536 typedef struct _qemuMigrationIOThread qemuMigrationIOThread; typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr; struct _qemuMigrationIOThread { virThread thread; virStreamPtr st; int sock; virError err; }; static void qemuMigrationIOFunc(void *arg) { qemuMigrationIOThreadPtr data = arg; char *buffer; int nbytes = TUNNEL_SEND_BUF_SIZE; if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0) { virReportOOMError(); virStreamAbort(data->st); goto error; } for (;;) { nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE); if (nbytes < 0) { virReportSystemError(errno, "%s", _("tunnelled migration failed to read from qemu")); virStreamAbort(data->st); VIR_FREE(buffer); goto error; } else if (nbytes == 0) /* EOF; get out of here */ break; if (virStreamSend(data->st, buffer, nbytes) < 0) { VIR_FREE(buffer); goto error; } } VIR_FREE(buffer); if (virStreamFinish(data->st) < 0) goto error; return; error: virCopyLastError(&data->err); virResetLastError(); } static qemuMigrationIOThreadPtr qemuMigrationStartTunnel(virStreamPtr st, int sock) { qemuMigrationIOThreadPtr io; if (VIR_ALLOC(io) < 0) { virReportOOMError(); return NULL; } io->st = st; io->sock = sock; if (virThreadCreate(&io->thread, true, qemuMigrationIOFunc, io) < 0) { virReportSystemError(errno, "%s", _("Unable to create migration thread")); VIR_FREE(io); return NULL; } return io; } static int qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io) { int rv = -1; virThreadJoin(&io->thread); /* Forward error from the IO thread, to this thread */ if (io->err.code != VIR_ERR_OK) { virSetError(&io->err); virResetError(&io->err); goto cleanup; } rv = 0; cleanup: VIR_FREE(io); return rv; } static int doTunnelMigrate(struct qemud_driver *driver, virDomainObjPtr vm, virStreamPtr st, const char *cookiein, int cookieinlen, char **cookieout, int *cookieoutlen, unsigned long flags, unsigned long resource) { qemuDomainObjPrivatePtr priv = vm->privateData; int client_sock = -1; int qemu_sock = -1; struct sockaddr_un sa_qemu, sa_client; socklen_t addrlen; int status; unsigned long long transferred, remaining, total; char *unixfile = NULL; unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; int ret = -1; qemuMigrationCookiePtr mig = NULL; qemuMigrationIOThreadPtr iothread = NULL; if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) && !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", _("Source qemu is too old to support tunnelled migration")); goto cleanup; } if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s", driver->libDir, vm->def->name) < 0) { virReportOOMError(); goto cleanup; } qemu_sock = socket(AF_UNIX, SOCK_STREAM, 0); if (qemu_sock < 0) { virReportSystemError(errno, "%s", _("cannot open tunnelled migration socket")); goto cleanup; } memset(&sa_qemu, 0, sizeof(sa_qemu)); sa_qemu.sun_family = AF_UNIX; if (virStrcpy(sa_qemu.sun_path, unixfile, sizeof(sa_qemu.sun_path)) == NULL) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Unix socket '%s' too big for destination"), unixfile); goto cleanup; } unlink(unixfile); if (bind(qemu_sock, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu)) < 0) { virReportSystemError(errno, _("Cannot bind to unix socket '%s' for tunnelled migration"), unixfile); goto cleanup; } if (listen(qemu_sock, 1) < 0) { virReportSystemError(errno, _("Cannot listen on unix socket '%s' for tunnelled migration"), unixfile); goto cleanup; } if (chown(unixfile, driver->user, driver->group) < 0) { virReportSystemError(errno, _("Cannot change unix socket '%s' owner"), unixfile); goto cleanup; } /* the domain may have shutdown or crashed while we had the locks dropped * in qemuDomainObjEnterRemoteWithDriver, so check again */ if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; } if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) VIR_WARN("unable to provide data for graphics client relocation"); /* 3. start migration on source */ /* Before EnterMonitor, since qemuProcessStopCPUs already does that */ if (!(flags & VIR_MIGRATE_LIVE) && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { if (qemuMigrationSetOffline(driver, vm) < 0) goto cleanup; } qemuDomainObjEnterMonitorWithDriver(driver, vm); if (resource > 0 && qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) { qemuDomainObjExitMonitorWithDriver(driver, vm); goto cleanup; } if (flags & VIR_MIGRATE_NON_SHARED_DISK) background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK; if (flags & VIR_MIGRATE_NON_SHARED_INC) background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC; if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) { ret = qemuMonitorMigrateToUnix(priv->mon, background_flags, unixfile); } else if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { const char *args[] = { "nc", "-U", unixfile, NULL }; ret = qemuMonitorMigrateToCommand(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, args); } else { ret = -1; } qemuDomainObjExitMonitorWithDriver(driver, vm); if (ret < 0) { qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", _("tunnelled migration monitor command failed")); goto cleanup; } ret = -1; if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; } /* From this point onwards we *must* call cancel to abort the * migration on source if anything goes wrong */ /* it is also possible that the migrate didn't fail initially, but * rather failed later on. Check the output of "info migrate" */ qemuDomainObjEnterMonitorWithDriver(driver, vm); if (qemuMonitorGetMigrationStatus(priv->mon, &status, &transferred, &remaining, &total) < 0) { qemuDomainObjExitMonitorWithDriver(driver, vm); goto cancel; } qemuDomainObjExitMonitorWithDriver(driver, vm); if (status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",_("migrate failed")); goto cancel; } addrlen = sizeof(sa_client); while ((client_sock = accept(qemu_sock, (struct sockaddr *)&sa_client, &addrlen)) < 0) { if (errno == EAGAIN || errno == EINTR) continue; virReportSystemError(errno, "%s", _("tunnelled migration failed to accept from qemu")); goto cancel; } if (!(iothread = qemuMigrationStartTunnel(st, client_sock))) goto cancel; ret = qemuMigrationWaitForCompletion(driver, vm); /* Close now to ensure the IO thread quits & is joinable in next method */ VIR_FORCE_CLOSE(client_sock); if (qemuMigrationStopTunnel(iothread) < 0) ret = -1; if (ret == 0 && qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) VIR_WARN("Unable to encode migration cookie"); cancel: if (ret != 0 && virDomainObjIsActive(vm)) { VIR_FORCE_CLOSE(client_sock); VIR_FORCE_CLOSE(qemu_sock); qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuMonitorMigrateCancel(priv->mon); qemuDomainObjExitMonitorWithDriver(driver, vm); } cleanup: qemuMigrationCookieFree(mig); VIR_FORCE_CLOSE(client_sock); VIR_FORCE_CLOSE(qemu_sock); if (unixfile) { unlink(unixfile); VIR_FREE(unixfile); } return ret; } /* This is essentially a re-impl of virDomainMigrateVersion2 * from libvirt.c, but running in source libvirtd context, * instead of client app context & also adding in tunnel * handling */ static int doPeer2PeerMigrate2(struct qemud_driver *driver, virConnectPtr sconn, virConnectPtr dconn, virDomainObjPtr vm, const char *dconnuri, unsigned long flags, const char *dname, unsigned long resource) { virDomainPtr ddomain = NULL; char *uri_out = NULL; char *cookie = NULL; char *dom_xml = NULL; int cookielen = 0, ret; virErrorPtr orig_err = NULL; int cancelled; virStreamPtr st = NULL; /* In version 2 of the protocol, the prepare step is slightly * different. We fetch the domain XML of the source domain * and pass it to Prepare2. */ if (!(dom_xml = qemuDomainFormatXML(driver, vm, VIR_DOMAIN_XML_SECURE | VIR_DOMAIN_XML_UPDATE_CPU))) return -1; if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) flags |= VIR_MIGRATE_PAUSED; VIR_DEBUG("Prepare2 %p", dconn); if (flags & VIR_MIGRATE_TUNNELLED) { /* * Tunnelled Migrate Version 2 does not support cookies * due to missing parameters in the prepareTunnel() API. */ if (!(st = virStreamNew(dconn, 0))) goto cleanup; qemuDomainObjEnterRemoteWithDriver(driver, vm); ret = dconn->driver->domainMigratePrepareTunnel (dconn, st, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } else { qemuDomainObjEnterRemoteWithDriver(driver, vm); ret = dconn->driver->domainMigratePrepare2 (dconn, &cookie, &cookielen, NULL, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); if (ret == -1) goto cleanup; /* the domain may have shutdown or crashed while we had the locks dropped * in qemuDomainObjEnterRemoteWithDriver, so check again */ if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; } if (!(flags & VIR_MIGRATE_TUNNELLED) && (uri_out == NULL)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("domainMigratePrepare2 did not set uri")); cancelled = 1; goto finish; } /* Perform the migration. The driver isn't supposed to return * until the migration is complete. */ VIR_DEBUG("Perform %p", sconn); if (flags & VIR_MIGRATE_TUNNELLED) ret = doTunnelMigrate(driver, vm, st, NULL, 0, NULL, NULL, flags, resource); else ret = doNativeMigrate(driver, vm, uri_out, cookie, cookielen, NULL, NULL, /* No out cookie with v2 migration */ flags, dname, resource); /* Perform failed. Make sure Finish doesn't overwrite the error */ if (ret < 0) orig_err = virSaveLastError(); /* If Perform returns < 0, then we need to cancel the VM * startup on the destination */ cancelled = ret < 0 ? 1 : 0; finish: /* In version 2 of the migration protocol, we pass the * status code from the sender to the destination host, * so it can do any cleanup if the migration failed. */ dname = dname ? dname : vm->def->name; VIR_DEBUG("Finish2 %p ret=%d", dconn, ret); qemuDomainObjEnterRemoteWithDriver(driver, vm); ddomain = dconn->driver->domainMigrateFinish2 (dconn, dname, cookie, cookielen, uri_out ? uri_out : dconnuri, flags, cancelled); qemuDomainObjExitRemoteWithDriver(driver, vm); cleanup: if (ddomain) { virUnrefDomain(ddomain); ret = 0; } else { ret = -1; } if (st) virUnrefStream(st); if (orig_err) { virSetError(orig_err); virFreeError(orig_err); } VIR_FREE(uri_out); VIR_FREE(cookie); return ret; } /* This is essentially a re-impl of virDomainMigrateVersion3 * from libvirt.c, but running in source libvirtd context, * instead of client app context & also adding in tunnel * handling */ static int doPeer2PeerMigrate3(struct qemud_driver *driver, virConnectPtr sconn, virConnectPtr dconn, virDomainObjPtr vm, const char *xmlin, const char *dconnuri, const char *uri, unsigned long flags, const char *dname, unsigned long resource) { virDomainPtr ddomain = NULL; char *uri_out = NULL; char *cookiein = NULL; char *cookieout = NULL; char *dom_xml = NULL; int cookieinlen = 0; int cookieoutlen = 0; int ret = -1; virErrorPtr orig_err = NULL; int cancelled; virStreamPtr st = NULL; VIR_DEBUG("Begin3 %p", sconn); dom_xml = qemuMigrationBegin(driver, vm, xmlin, &cookieout, &cookieoutlen); if (!dom_xml) goto cleanup; if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) flags |= VIR_MIGRATE_PAUSED; VIR_DEBUG("Prepare3 %p", dconn); cookiein = cookieout; cookieinlen = cookieoutlen; cookieout = NULL; cookieoutlen = 0; if (flags & VIR_MIGRATE_TUNNELLED) { if (!(st = virStreamNew(dconn, 0))) goto cleanup; qemuDomainObjEnterRemoteWithDriver(driver, vm); ret = dconn->driver->domainMigratePrepareTunnel3 (dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } else { qemuDomainObjEnterRemoteWithDriver(driver, vm); ret = dconn->driver->domainMigratePrepare3 (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); if (ret == -1) goto cleanup; if (!(flags & VIR_MIGRATE_TUNNELLED) && (uri_out == NULL)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("domainMigratePrepare3 did not set uri")); cancelled = 1; goto finish; } /* Perform the migration. The driver isn't supposed to return * until the migration is complete. The src VM should remain * running, but in paused state until the destination can * confirm migration completion. */ VIR_DEBUG("Perform3 %p uri=%s uri_out=%s", sconn, uri, uri_out); VIR_FREE(cookiein); cookiein = cookieout; cookieinlen = cookieoutlen; cookieout = NULL; cookieoutlen = 0; if (flags & VIR_MIGRATE_TUNNELLED) ret = doTunnelMigrate(driver, vm, st, cookiein, cookieinlen, &cookieout, &cookieoutlen, flags, resource); else ret = doNativeMigrate(driver, vm, uri_out, cookiein, cookieinlen, &cookieout, &cookieoutlen, flags, dname, resource); /* Perform failed. Make sure Finish doesn't overwrite the error */ if (ret < 0) orig_err = virSaveLastError(); /* If Perform returns < 0, then we need to cancel the VM * startup on the destination */ cancelled = ret < 0 ? 1 : 0; finish: /* * The status code from the source is passed to the destination. * The dest can cleanup in the source indicated it failed to * send all migration data. Returns NULL for ddomain if * the dest was unable to complete migration. */ VIR_DEBUG("Finish3 %p ret=%d", dconn, ret); VIR_FREE(cookiein); cookiein = cookieout; cookieinlen = cookieoutlen; cookieout = NULL; cookieoutlen = 0; dname = dname ? dname : vm->def->name; qemuDomainObjEnterRemoteWithDriver(driver, vm); ret = dconn->driver->domainMigrateFinish3 (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen, dconnuri, uri_out ? uri_out : uri, flags, cancelled, &ddomain); qemuDomainObjExitRemoteWithDriver(driver, vm); /* If ret is 0 then 'ddomain' indicates whether the VM is * running on the dest. If not running, we can restart * the source. If ret is -1, we can't be sure what happened * to the VM on the dest, thus the only safe option is to * kill the VM on the source, even though that may leave * no VM at all on either host. */ cancelled = ret == 0 && ddomain == NULL ? 1 : 0; /* * If cancelled, then src VM will be restarted, else * it will be killed */ VIR_DEBUG("Confirm3 %p ret=%d vm=%p", sconn, ret, vm); VIR_FREE(cookiein); cookiein = cookieout; cookieinlen = cookieoutlen; cookieout = NULL; cookieoutlen = 0; ret = qemuMigrationConfirm(driver, sconn, vm, cookiein, cookieinlen, flags, cancelled); /* If Confirm3 returns -1, there's nothing more we can * do, but fortunately worst case is that there is a * domain left in 'paused' state on source. */ cleanup: if (ddomain) { virUnrefDomain(ddomain); ret = 0; } else { ret = -1; } if (st) virUnrefStream(st); if (orig_err) { virSetError(orig_err); virFreeError(orig_err); } VIR_FREE(uri_out); VIR_FREE(cookiein); VIR_FREE(cookieout); return ret; } static int doPeer2PeerMigrate(struct qemud_driver *driver, virConnectPtr sconn, virDomainObjPtr vm, const char *xmlin, const char *dconnuri, const char *uri, unsigned long flags, const char *dname, unsigned long resource) { int ret = -1; virConnectPtr dconn = NULL; bool p2p; bool v3; /* the order of operations is important here; we make sure the * destination side is completely setup before we touch the source */ qemuDomainObjEnterRemoteWithDriver(driver, vm); dconn = virConnectOpen(dconnuri); qemuDomainObjExitRemoteWithDriver(driver, vm); if (dconn == NULL) { qemuReportError(VIR_ERR_OPERATION_FAILED, _("Failed to connect to remote libvirt URI %s"), uri); return -1; } qemuDomainObjEnterRemoteWithDriver(driver, vm); p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_P2P); v3 = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_V3); qemuDomainObjExitRemoteWithDriver(driver, vm); if (!p2p) { qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", _("Destination libvirt does not support peer-to-peer migration protocol")); goto cleanup; } /* domain may have been stopped while we were talking to remote daemon */ if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; } if (v3) ret = doPeer2PeerMigrate3(driver, sconn, dconn, vm, xmlin, dconnuri, uri, flags, dname, resource); else ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm, dconnuri, flags, dname, resource); cleanup: /* don't call virConnectClose(), because that resets any pending errors */ qemuDomainObjEnterRemoteWithDriver(driver, vm); virUnrefConnect(dconn); qemuDomainObjExitRemoteWithDriver(driver, vm); return ret; } int qemuMigrationPerform(struct qemud_driver *driver, virConnectPtr conn, virDomainObjPtr vm, const char *xmlin, const char *dconnuri, const char *uri, const char *cookiein, int cookieinlen, char **cookieout, int *cookieoutlen, unsigned long flags, const char *dname, unsigned long resource, bool killOnFinish) { virDomainEventPtr event = NULL; int ret = -1; int resume = 0; qemuDomainObjPrivatePtr priv = vm->privateData; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; priv->jobActive = QEMU_JOB_MIGRATION_OUT; if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; } memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING; if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) { if (cookieinlen) { qemuReportError(VIR_ERR_OPERATION_INVALID, "%s", _("received unexpected cookie with P2P migration")); goto endjob; } if (doPeer2PeerMigrate(driver, conn, vm, xmlin, dconnuri, uri, flags, dname, resource) < 0) /* doPeer2PeerMigrate already set the error, so just get out */ goto endjob; } else { if (dconnuri) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Unexpected dconnuri parameter with non-peer2peer migration")); goto endjob; } if (doNativeMigrate(driver, vm, uri, cookiein, cookieinlen, cookieout, cookieoutlen, flags, dname, resource) < 0) goto endjob; } /* Clean up the source domain. */ if (killOnFinish) { qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_MIGRATED); qemuAuditDomainStop(vm, "migrated"); resume = 0; event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_MIGRATED); } ret = 0; endjob: if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { /* we got here through some sort of failure; start the domain again */ if (qemuProcessStartCPUs(driver, vm, conn, VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) { /* Hm, we already know we are in error here. We don't want to * overwrite the previous error, though, so we just throw something * to the logs and hope for the best */ VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name); } event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_RESUMED, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); } if (vm) { if (qemuDomainObjEndJob(vm) == 0) { vm = NULL; } else if (!virDomainObjIsActive(vm) && (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) { if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm); virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } } cleanup: if (vm) virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); return ret; } #if WITH_MACVTAP static void qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) { int i; int last_good_net = -1; virDomainNetDefPtr net; for (i = 0; i < def->nnets; i++) { net = def->nets[i]; if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) { if (vpAssociatePortProfileId(net->ifname, net->mac, net->data.direct.linkdev, &net->data.direct.virtPortProfile, def->uuid, VIR_VM_OP_MIGRATE_IN_FINISH) != 0) goto err_exit; } last_good_net = i; } return; err_exit: for (i = 0; i < last_good_net; i++) { net = def->nets[i]; if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) { vpDisassociatePortProfileId(net->ifname, net->mac, net->data.direct.linkdev, &net->data.direct.virtPortProfile, VIR_VM_OP_MIGRATE_IN_FINISH); } } } #else /* !WITH_MACVTAP */ static void qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def ATTRIBUTE_UNUSED) { } #endif /* WITH_MACVTAP */ virDomainPtr qemuMigrationFinish(struct qemud_driver *driver, virConnectPtr dconn, virDomainObjPtr vm, const char *cookiein, int cookieinlen, char **cookieout, int *cookieoutlen, unsigned long flags, int retcode) { virDomainPtr dom = NULL; virDomainEventPtr event = NULL; int newVM = 1; qemuDomainObjPrivatePtr priv = NULL; qemuMigrationCookiePtr mig = NULL; priv = vm->privateData; if (priv->jobActive != QEMU_JOB_MIGRATION_IN) { qemuReportError(VIR_ERR_NO_DOMAIN, _("domain '%s' is not processing incoming migration"), vm->def->name); goto cleanup; } priv->jobActive = QEMU_JOB_NONE; memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; /* Did the migration go as planned? If yes, return the domain * object, but if no, clean up the empty qemu process. */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; } qemuMigrationVPAssociatePortProfiles(vm->def); if (flags & VIR_MIGRATE_PERSIST_DEST) { if (vm->persistent) newVM = 0; vm->persistent = 1; if (virDomainSaveConfig(driver->configDir, vm->def) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain * shuts down, management tools are in for a surprise. On the * other hand, if we report failure, then the management tools * might try to restart the domain on the source side, even * though the domain is actually running on the destination. * Return a NULL dom pointer, and hope that this is a rare * situation and management tools are smart. */ vm = NULL; goto endjob; } event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_DEFINED, newVM ? VIR_DOMAIN_EVENT_DEFINED_ADDED : VIR_DOMAIN_EVENT_DEFINED_UPDATED); if (event) qemuDomainEventQueue(driver, event); event = NULL; } dom = virGetDomain (dconn, vm->def->name, vm->def->uuid); if (!(flags & VIR_MIGRATE_PAUSED)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there */ if (qemuProcessStartCPUs(driver, vm, dconn, VIR_DOMAIN_RUNNING_MIGRATED) < 0) { if (virGetLastError() == NULL) qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("resume operation failed")); goto endjob; } } event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_RESUMED, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); qemuDomainEventQueue(driver, event); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { VIR_WARN("Failed to save status on vm %s", vm->def->name); goto endjob; } } else { qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_FAILED); qemuAuditDomainStop(vm, "failed"); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_FAILED); if (!vm->persistent) { if (qemuDomainObjEndJob(vm) > 0) virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } } if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) VIR_WARN("Unable to encode migration cookie"); endjob: if (vm && qemuDomainObjEndJob(vm) == 0) vm = NULL; cleanup: if (vm) virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); return dom; } int qemuMigrationConfirm(struct qemud_driver *driver, virConnectPtr conn, virDomainObjPtr vm, const char *cookiein, int cookieinlen, unsigned int flags ATTRIBUTE_UNUSED, int retcode) { qemuMigrationCookiePtr mig; virDomainEventPtr event = NULL; int rv = -1; if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) return -1; if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; } /* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs */ if (retcode == 0) { qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_MIGRATED); qemuAuditDomainStop(vm, "migrated"); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_MIGRATED); } else { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there */ if (qemuProcessStartCPUs(driver, vm, conn, VIR_DOMAIN_RUNNING_MIGRATED) < 0) { if (virGetLastError() == NULL) qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("resume operation failed")); goto cleanup; } event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_RESUMED, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { VIR_WARN("Failed to save status on vm %s", vm->def->name); goto cleanup; } } qemuMigrationCookieFree(mig); rv = 0; cleanup: if (event) qemuDomainEventQueue(driver, event); return rv; } /* Helper function called while driver lock is held and vm is active. */ int qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm, int fd, off_t offset, const char *path, const char *compressor, bool is_reg, bool bypassSecurityDriver) { qemuDomainObjPrivatePtr priv = vm->privateData; virCgroupPtr cgroup = NULL; int ret = -1; int rc; bool restoreLabel = false; virCommandPtr cmd = NULL; int pipeFD[2] = { -1, -1 }; if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) && (!compressor || pipe(pipeFD) == 0)) { /* All right! We can use fd migration, which means that qemu * doesn't have to open() the file, so while we still have to * grant SELinux access, we can do it on fd and avoid cleanup * later, as well as skip futzing with cgroup. */ if (virSecurityManagerSetFDLabel(driver->securityManager, vm, compressor ? pipeFD[1] : fd) < 0) goto cleanup; bypassSecurityDriver = true; } else { /* Phooey - we have to fall back on exec migration, where qemu * has to popen() the file by name. We might also stumble on * a race present in some qemu versions where it does a wait() * that botches pclose. */ if (!is_reg && qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_DEVICES)) { if (virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0) != 0) { qemuReportError(VIR_ERR_INTERNAL_ERROR, _("Unable to find cgroup for %s"), vm->def->name); goto cleanup; } rc = virCgroupAllowDevicePath(cgroup, path, VIR_CGROUP_DEVICE_RW); qemuAuditCgroupPath(vm, cgroup, "allow", path, "rw", rc); if (rc < 0) { virReportSystemError(-rc, _("Unable to allow device %s for %s"), path, vm->def->name); goto cleanup; } } if ((!bypassSecurityDriver) && virSecurityManagerSetSavedStateLabel(driver->securityManager, vm, path) < 0) goto cleanup; restoreLabel = true; } qemuDomainObjEnterMonitorWithDriver(driver, vm); if (!compressor) { const char *args[] = { "cat", NULL }; if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) && priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX) { rc = qemuMonitorMigrateToFd(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, fd); } else { rc = qemuMonitorMigrateToFile(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, args, path, offset); } } else { const char *prog = compressor; const char *args[] = { prog, "-c", NULL }; if (pipeFD[0] != -1) { cmd = virCommandNewArgs(args); virCommandSetInputFD(cmd, pipeFD[0]); virCommandSetOutputFD(cmd, &fd); if (virSetCloseExec(pipeFD[1]) < 0) { virReportSystemError(errno, "%s", _("Unable to set cloexec flag")); qemuDomainObjExitMonitorWithDriver(driver, vm); goto cleanup; } if (virCommandRunAsync(cmd, NULL) < 0) { qemuDomainObjExitMonitorWithDriver(driver, vm); goto cleanup; } rc = qemuMonitorMigrateToFd(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, pipeFD[1]); if (VIR_CLOSE(pipeFD[0]) < 0 || VIR_CLOSE(pipeFD[1]) < 0) VIR_WARN("failed to close intermediate pipe"); } else { rc = qemuMonitorMigrateToFile(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, args, path, offset); } } qemuDomainObjExitMonitorWithDriver(driver, vm); if (rc < 0) goto cleanup; rc = qemuMigrationWaitForCompletion(driver, vm); if (rc < 0) goto cleanup; if (cmd && virCommandWait(cmd, NULL) < 0) goto cleanup; ret = 0; cleanup: VIR_FORCE_CLOSE(pipeFD[0]); VIR_FORCE_CLOSE(pipeFD[1]); virCommandFree(cmd); if (restoreLabel && (!bypassSecurityDriver) && virSecurityManagerRestoreSavedStateLabel(driver->securityManager, vm, path) < 0) VIR_WARN("failed to restore save state label on %s", path); if (cgroup != NULL) { rc = virCgroupDenyDevicePath(cgroup, path, VIR_CGROUP_DEVICE_RWM); qemuAuditCgroupPath(vm, cgroup, "deny", path, "rwm", rc); if (rc < 0) VIR_WARN("Unable to deny device %s for %s %d", path, vm->def->name, rc); virCgroupFree(&cgroup); } return ret; }