qemu_migration.c 170.8 KB
Newer Older
1 2 3
/*
 * qemu_migration.c: QEMU migration handling
 *
4
 * Copyright (C) 2006-2014 Red Hat, Inc.
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library.  If not, see
O
Osier Yang 已提交
18
 * <http://www.gnu.org/licenses/>.
19 20 21 22 23
 *
 */

#include <config.h>

J
Ján Tomko 已提交
24 25
#include <netdb.h>
#include <sys/socket.h>
26
#include <sys/time.h>
27
#ifdef WITH_GNUTLS
28 29 30
# include <gnutls/gnutls.h>
# include <gnutls/x509.h>
#endif
31
#include <fcntl.h>
32
#include <poll.h>
33 34 35 36 37 38

#include "qemu_migration.h"
#include "qemu_monitor.h"
#include "qemu_domain.h"
#include "qemu_process.h"
#include "qemu_capabilities.h"
39
#include "qemu_command.h"
40
#include "qemu_cgroup.h"
41
#include "qemu_hotplug.h"
42

43
#include "domain_audit.h"
44
#include "virlog.h"
45
#include "virerror.h"
46
#include "viralloc.h"
E
Eric Blake 已提交
47
#include "virfile.h"
48 49
#include "datatypes.h"
#include "fdstream.h"
50
#include "viruuid.h"
51
#include "virtime.h"
52
#include "locking/domain_lock.h"
53
#include "rpc/virnetsocket.h"
54
#include "virstoragefile.h"
M
Martin Kletzander 已提交
55
#include "viruri.h"
56
#include "virhook.h"
57
#include "virstring.h"
58
#include "virtypedparam.h"
59 60 61

#define VIR_FROM_THIS VIR_FROM_QEMU

62 63
VIR_LOG_INIT("qemu.qemu_migration");

64 65 66 67 68 69 70 71 72 73 74 75 76
VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST,
              "none",
              "perform2",
              "begin3",
              "perform3",
              "perform3_done",
              "confirm3_cancelled",
              "confirm3",
              "prepare",
              "finish2",
              "finish3",
);

77
enum qemuMigrationCookieFlags {
78
    QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
79
    QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
80
    QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
81
    QEMU_MIGRATION_COOKIE_FLAG_NETWORK,
82
    QEMU_MIGRATION_COOKIE_FLAG_NBD,
83
    QEMU_MIGRATION_COOKIE_FLAG_STATS,
84 85 86 87 88 89 90

    QEMU_MIGRATION_COOKIE_FLAG_LAST
};

VIR_ENUM_DECL(qemuMigrationCookieFlag);
VIR_ENUM_IMPL(qemuMigrationCookieFlag,
              QEMU_MIGRATION_COOKIE_FLAG_LAST,
91 92 93 94
              "graphics",
              "lockstate",
              "persistent",
              "network",
95 96
              "nbd",
              "statistics");
97 98 99

enum qemuMigrationCookieFeatures {
    QEMU_MIGRATION_COOKIE_GRAPHICS  = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
100
    QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
101
    QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
102
    QEMU_MIGRATION_COOKIE_NETWORK = (1 << QEMU_MIGRATION_COOKIE_FLAG_NETWORK),
103
    QEMU_MIGRATION_COOKIE_NBD = (1 << QEMU_MIGRATION_COOKIE_FLAG_NBD),
104
    QEMU_MIGRATION_COOKIE_STATS = (1 << QEMU_MIGRATION_COOKIE_FLAG_STATS),
105 106 107 108 109 110 111 112 113 114 115 116
};

typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
typedef qemuMigrationCookieGraphics *qemuMigrationCookieGraphicsPtr;
struct _qemuMigrationCookieGraphics {
    int type;
    int port;
    int tlsPort;
    char *listen;
    char *tlsSubject;
};

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
typedef struct _qemuMigrationCookieNetData qemuMigrationCookieNetData;
typedef qemuMigrationCookieNetData *qemuMigrationCookieNetDataPtr;
struct _qemuMigrationCookieNetData {
    int vporttype; /* enum virNetDevVPortProfile */

    /*
     * Array of pointers to saved data. Each VIF will have it's own
     * data to transfer.
     */
    char *portdata;
};

typedef struct _qemuMigrationCookieNetwork qemuMigrationCookieNetwork;
typedef qemuMigrationCookieNetwork *qemuMigrationCookieNetworkPtr;
struct _qemuMigrationCookieNetwork {
    /* How many virtual NICs are we saving data for? */
    int nnets;

    qemuMigrationCookieNetDataPtr net;
};

138 139 140 141 142 143
typedef struct _qemuMigrationCookieNBD qemuMigrationCookieNBD;
typedef qemuMigrationCookieNBD *qemuMigrationCookieNBDPtr;
struct _qemuMigrationCookieNBD {
    int port; /* on which port does NBD server listen for incoming data */
};

144 145 146
typedef struct _qemuMigrationCookie qemuMigrationCookie;
typedef qemuMigrationCookie *qemuMigrationCookiePtr;
struct _qemuMigrationCookie {
E
Eric Blake 已提交
147 148
    unsigned int flags;
    unsigned int flagsMandatory;
149 150

    /* Host properties */
151 152 153 154
    unsigned char localHostuuid[VIR_UUID_BUFLEN];
    unsigned char remoteHostuuid[VIR_UUID_BUFLEN];
    char *localHostname;
    char *remoteHostname;
155 156 157 158

    /* Guest properties */
    unsigned char uuid[VIR_UUID_BUFLEN];
    char *name;
159

160 161 162 163
    /* If (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) */
    char *lockState;
    char *lockDriver;

164 165
    /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */
    qemuMigrationCookieGraphicsPtr graphics;
166 167 168

    /* If (flags & QEMU_MIGRATION_COOKIE_PERSISTENT) */
    virDomainDefPtr persistent;
169 170 171

    /* If (flags & QEMU_MIGRATION_COOKIE_NETWORK) */
    qemuMigrationCookieNetworkPtr network;
172 173 174

    /* If (flags & QEMU_MIGRATION_COOKIE_NBD) */
    qemuMigrationCookieNBDPtr nbd;
175 176 177

    /* If (flags & QEMU_MIGRATION_COOKIE_STATS) */
    qemuDomainJobInfoPtr jobInfo;
178 179
};

180 181 182 183 184 185 186 187 188
static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap)
{
    if (!grap)
        return;
    VIR_FREE(grap->listen);
    VIR_FREE(grap->tlsSubject);
    VIR_FREE(grap);
}

189

190 191 192
static void
qemuMigrationCookieNetworkFree(qemuMigrationCookieNetworkPtr network)
{
193
    size_t i;
194 195 196 197 198 199 200 201 202 203 204 205 206

    if (!network)
        return;

    if (network->net) {
        for (i = 0; i < network->nnets; i++)
            VIR_FREE(network->net[i].portdata);
    }
    VIR_FREE(network->net);
    VIR_FREE(network);
}


207 208 209 210 211
static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
{
    if (!mig)
        return;

212 213
    qemuMigrationCookieGraphicsFree(mig->graphics);
    qemuMigrationCookieNetworkFree(mig->network);
214

215 216
    VIR_FREE(mig->localHostname);
    VIR_FREE(mig->remoteHostname);
217
    VIR_FREE(mig->name);
218 219
    VIR_FREE(mig->lockState);
    VIR_FREE(mig->lockDriver);
220
    VIR_FREE(mig->nbd);
221 222 223 224
    VIR_FREE(mig);
}


225
#ifdef WITH_GNUTLS
226 227 228 229 230 231 232 233 234 235 236 237
static char *
qemuDomainExtractTLSSubject(const char *certdir)
{
    char *certfile = NULL;
    char *subject = NULL;
    char *pemdata = NULL;
    gnutls_datum_t pemdatum;
    gnutls_x509_crt_t cert;
    int ret;
    size_t subjectlen;

    if (virAsprintf(&certfile, "%s/server-cert.pem", certdir) < 0)
238
        goto error;
239 240

    if (virFileReadAll(certfile, 8192, &pemdata) < 0) {
241 242
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("unable to read server cert %s"), certfile);
243 244 245 246 247
        goto error;
    }

    ret = gnutls_x509_crt_init(&cert);
    if (ret < 0) {
248 249 250
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("cannot initialize cert object: %s"),
                       gnutls_strerror(ret));
251 252 253 254 255 256 257 258
        goto error;
    }

    pemdatum.data = (unsigned char *)pemdata;
    pemdatum.size = strlen(pemdata);

    ret = gnutls_x509_crt_import(cert, &pemdatum, GNUTLS_X509_FMT_PEM);
    if (ret < 0) {
259 260 261
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("cannot load cert data from %s: %s"),
                       certfile, gnutls_strerror(ret));
262 263 264 265 266
        goto error;
    }

    subjectlen = 1024;
    if (VIR_ALLOC_N(subject, subjectlen+1) < 0)
267
        goto error;
268 269 270 271 272 273 274 275 276

    gnutls_x509_crt_get_dn(cert, subject, &subjectlen);
    subject[subjectlen] = '\0';

    VIR_FREE(certfile);
    VIR_FREE(pemdata);

    return subject;

277
 error:
278 279 280 281
    VIR_FREE(certfile);
    VIR_FREE(pemdata);
    return NULL;
}
282
#endif
283 284

static qemuMigrationCookieGraphicsPtr
285
qemuMigrationCookieGraphicsAlloc(virQEMUDriverPtr driver,
286 287 288 289
                                 virDomainGraphicsDefPtr def)
{
    qemuMigrationCookieGraphicsPtr mig = NULL;
    const char *listenAddr;
290
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
291 292

    if (VIR_ALLOC(mig) < 0)
293
        goto error;
294 295 296 297

    mig->type = def->type;
    if (mig->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
        mig->port = def->data.vnc.port;
298
        listenAddr = virDomainGraphicsListenGetAddress(def, 0);
299
        if (!listenAddr)
300
            listenAddr = cfg->vncListen;
301

302
#ifdef WITH_GNUTLS
303 304
        if (cfg->vncTLS &&
            !(mig->tlsSubject = qemuDomainExtractTLSSubject(cfg->vncTLSx509certdir)))
305
            goto error;
306
#endif
307 308
    } else {
        mig->port = def->data.spice.port;
309
        if (cfg->spiceTLS)
310 311 312
            mig->tlsPort = def->data.spice.tlsPort;
        else
            mig->tlsPort = -1;
313
        listenAddr = virDomainGraphicsListenGetAddress(def, 0);
314
        if (!listenAddr)
315
            listenAddr = cfg->spiceListen;
316

317
#ifdef WITH_GNUTLS
318 319
        if (cfg->spiceTLS &&
            !(mig->tlsSubject = qemuDomainExtractTLSSubject(cfg->spiceTLSx509certdir)))
320
            goto error;
321
#endif
322
    }
323 324
    if (VIR_STRDUP(mig->listen, listenAddr) < 0)
        goto error;
325

326
    virObjectUnref(cfg);
327 328
    return mig;

329
 error:
330
    qemuMigrationCookieGraphicsFree(mig);
331
    virObjectUnref(cfg);
332 333 334 335
    return NULL;
}


336
static qemuMigrationCookieNetworkPtr
337
qemuMigrationCookieNetworkAlloc(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
338 339 340
                                virDomainDefPtr def)
{
    qemuMigrationCookieNetworkPtr mig;
341
    size_t i;
342 343

    if (VIR_ALLOC(mig) < 0)
344
        goto error;
345 346 347 348

    mig->nnets = def->nnets;

    if (VIR_ALLOC_N(mig->net, def->nnets) <0)
349
        goto error;
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364

    for (i = 0; i < def->nnets; i++) {
        virDomainNetDefPtr netptr;
        virNetDevVPortProfilePtr vport;

        netptr = def->nets[i];
        vport = virDomainNetGetActualVirtPortProfile(netptr);

        if (vport) {
            mig->net[i].vporttype = vport->virtPortType;

            switch (vport->virtPortType) {
            case VIR_NETDEV_VPORT_PROFILE_NONE:
            case VIR_NETDEV_VPORT_PROFILE_8021QBG:
            case VIR_NETDEV_VPORT_PROFILE_8021QBH:
365
               break;
366
            case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
367 368 369 370 371 372 373 374
                if (virNetDevOpenvswitchGetMigrateData(&mig->net[i].portdata,
                                                       netptr->ifname) != 0) {
                        virReportSystemError(VIR_ERR_INTERNAL_ERROR,
                                             _("Unable to run command to get OVS port data for "
                                             "interface %s"), netptr->ifname);
                        goto error;
                }
                break;
375 376 377 378 379 380 381
            default:
                break;
            }
        }
    }
    return mig;

382
 error:
383 384 385 386
    qemuMigrationCookieNetworkFree(mig);
    return NULL;
}

387 388 389
static qemuMigrationCookiePtr
qemuMigrationCookieNew(virDomainObjPtr dom)
{
J
Jiri Denemark 已提交
390
    qemuDomainObjPrivatePtr priv = dom->privateData;
391
    qemuMigrationCookiePtr mig = NULL;
J
Jiri Denemark 已提交
392
    const char *name;
393 394

    if (VIR_ALLOC(mig) < 0)
395
        goto error;
396

J
Jiri Denemark 已提交
397 398 399 400
    if (priv->origname)
        name = priv->origname;
    else
        name = dom->def->name;
401 402
    if (VIR_STRDUP(mig->name, name) < 0)
        goto error;
403 404
    memcpy(mig->uuid, dom->def->uuid, VIR_UUID_BUFLEN);

405
    if (!(mig->localHostname = virGetHostname()))
406
        goto error;
407
    if (virGetHostUUID(mig->localHostuuid) < 0) {
408 409
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("Unable to obtain host UUID"));
410 411 412 413 414
        goto error;
    }

    return mig;

415
 error:
416 417 418 419 420
    qemuMigrationCookieFree(mig);
    return NULL;
}


421 422
static int
qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig,
423
                               virQEMUDriverPtr driver,
424 425
                               virDomainObjPtr dom)
{
426 427
    size_t i = 0;

428
    if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) {
429 430
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("Migration graphics data already present"));
431 432 433
        return -1;
    }

434 435 436 437 438 439 440 441
    for (i = 0; i < dom->def->ngraphics; i++) {
       if (dom->def->graphics[i]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
           if (!(mig->graphics =
                 qemuMigrationCookieGraphicsAlloc(driver, dom->def->graphics[i])))
               return -1;
           mig->flags |= QEMU_MIGRATION_COOKIE_GRAPHICS;
           break;
       }
442
    }
443 444 445 446 447

    return 0;
}


448 449
static int
qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig,
450
                                virQEMUDriverPtr driver,
451 452 453 454 455
                                virDomainObjPtr dom)
{
    qemuDomainObjPrivatePtr priv = dom->privateData;

    if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
456 457
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("Migration lockstate data already present"));
458 459 460 461
        return -1;
    }

    if (virDomainObjGetState(dom, NULL) == VIR_DOMAIN_PAUSED) {
462
        if (VIR_STRDUP(mig->lockState, priv->lockState) < 0)
463 464 465 466 467 468
            return -1;
    } else {
        if (virDomainLockProcessInquire(driver->lockManager, dom, &mig->lockState) < 0)
            return -1;
    }

469
    if (VIR_STRDUP(mig->lockDriver, virLockManagerPluginGetName(driver->lockManager)) < 0) {
470 471 472 473 474 475 476 477 478 479 480
        VIR_FREE(mig->lockState);
        return -1;
    }

    mig->flags |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
    mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_LOCKSTATE;

    return 0;
}


481 482 483 484 485
static int
qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig,
                                 virDomainObjPtr dom)
{
    if (mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) {
486 487
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("Migration persistent data already present"));
488 489 490 491 492 493 494 495 496 497 498 499 500
        return -1;
    }

    if (!dom->newDef)
        return 0;

    mig->persistent = dom->newDef;
    mig->flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
    mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_PERSISTENT;
    return 0;
}


501 502
static int
qemuMigrationCookieAddNetwork(qemuMigrationCookiePtr mig,
503
                              virQEMUDriverPtr driver,
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
                              virDomainObjPtr dom)
{
    if (mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("Network migration data already present"));
        return -1;
    }

    if (dom->def->nnets > 0) {
        mig->network = qemuMigrationCookieNetworkAlloc(driver, dom->def);
        if (!mig->network)
            return -1;
        mig->flags |= QEMU_MIGRATION_COOKIE_NETWORK;
    }

    return 0;
}

522

523 524 525 526 527 528 529 530 531
static int
qemuMigrationCookieAddNBD(qemuMigrationCookiePtr mig,
                          virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
                          virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    /* It is not a bug if there already is a NBD data */
    if (!mig->nbd &&
532
        VIR_ALLOC(mig->nbd) < 0)
533 534 535 536 537 538 539 540 541
        return -1;

    mig->nbd->port = priv->nbdPort;
    mig->flags |= QEMU_MIGRATION_COOKIE_NBD;

    return 0;
}


542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
static int
qemuMigrationCookieAddStatistics(qemuMigrationCookiePtr mig,
                                 virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (!priv->job.completed)
        return 0;

    if (!mig->jobInfo && VIR_ALLOC(mig->jobInfo) < 0)
        return -1;

    *mig->jobInfo = *priv->job.completed;
    mig->flags |= QEMU_MIGRATION_COOKIE_STATS;

    return 0;
}


561 562 563
static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf,
                                                 qemuMigrationCookieGraphicsPtr grap)
{
564
    virBufferAsprintf(buf, "<graphics type='%s' port='%d' listen='%s'",
565 566 567 568 569 570
                      virDomainGraphicsTypeToString(grap->type),
                      grap->port, grap->listen);
    if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
        virBufferAsprintf(buf, " tlsPort='%d'", grap->tlsPort);
    if (grap->tlsSubject) {
        virBufferAddLit(buf, ">\n");
571 572 573 574
        virBufferAdjustIndent(buf, 2);
        virBufferEscapeString(buf, "<cert info='subject' value='%s'/>\n", grap->tlsSubject);
        virBufferAdjustIndent(buf, -2);
        virBufferAddLit(buf, "</graphics>\n");
575 576 577 578 579 580
    } else {
        virBufferAddLit(buf, "/>\n");
    }
}


581 582 583 584
static void
qemuMigrationCookieNetworkXMLFormat(virBufferPtr buf,
                                    qemuMigrationCookieNetworkPtr optr)
{
585
    size_t i;
586 587 588 589 590 591
    bool empty = true;

    for (i = 0; i < optr->nnets; i++) {
        /* If optr->net[i].vporttype is not set, there is nothing to transfer */
        if (optr->net[i].vporttype != VIR_NETDEV_VPORT_PROFILE_NONE) {
            if (empty) {
592 593
                virBufferAddLit(buf, "<network>\n");
                virBufferAdjustIndent(buf, 2);
594 595
                empty = false;
            }
596
            virBufferAsprintf(buf, "<interface index='%zu' vporttype='%s'",
597 598 599
                              i, virNetDevVPortTypeToString(optr->net[i].vporttype));
            if (optr->net[i].portdata) {
                virBufferAddLit(buf, ">\n");
600 601
                virBufferAdjustIndent(buf, 2);
                virBufferEscapeString(buf, "<portdata>%s</portdata>\n",
602
                                      optr->net[i].portdata);
603 604
                virBufferAdjustIndent(buf, -2);
                virBufferAddLit(buf, "</interface>\n");
605 606 607 608 609
            } else {
                virBufferAddLit(buf, "/>\n");
            }
        }
    }
610 611 612 613
    if (!empty) {
        virBufferAdjustIndent(buf, -2);
        virBufferAddLit(buf, "</network>\n");
    }
614 615 616
}


617 618 619 620 621 622 623 624 625
static void
qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf,
                                       qemuDomainJobInfoPtr jobInfo)
{
    qemuMonitorMigrationStatus *status = &jobInfo->status;

    virBufferAddLit(buf, "<statistics>\n");
    virBufferAdjustIndent(buf, 2);

626 627 628
    virBufferAsprintf(buf, "<started>%llu</started>\n", jobInfo->started);
    virBufferAsprintf(buf, "<stopped>%llu</stopped>\n", jobInfo->stopped);

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_TIME_ELAPSED,
                      jobInfo->timeElapsed);
    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_TIME_REMAINING,
                      jobInfo->timeRemaining);
    if (status->downtime_set)
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_DOWNTIME,
                          status->downtime);

    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_MEMORY_TOTAL,
                      status->ram_total);
    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_MEMORY_PROCESSED,
                      status->ram_transferred);
    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_MEMORY_REMAINING,
                      status->ram_remaining);

    if (status->ram_duplicate_set) {
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_MEMORY_CONSTANT,
                          status->ram_duplicate);
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_MEMORY_NORMAL,
                          status->ram_normal);
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES,
                          status->ram_normal_bytes);
    }

    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_DISK_TOTAL,
                      status->disk_total);
    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_DISK_PROCESSED,
                      status->disk_transferred);
    virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                      VIR_DOMAIN_JOB_DISK_REMAINING,
                      status->disk_remaining);

    if (status->xbzrle_set) {
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_COMPRESSION_CACHE,
                          status->xbzrle_cache_size);
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_COMPRESSION_BYTES,
                          status->xbzrle_bytes);
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_COMPRESSION_PAGES,
                          status->xbzrle_pages);
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES,
                          status->xbzrle_cache_miss);
        virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
                          VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW,
                          status->xbzrle_overflow);
    }

    virBufferAdjustIndent(buf, -2);
    virBufferAddLit(buf, "</statistics>\n");
}


695
static int
696
qemuMigrationCookieXMLFormat(virQEMUDriverPtr driver,
697
                             virBufferPtr buf,
698
                             qemuMigrationCookiePtr mig)
699 700 701
{
    char uuidstr[VIR_UUID_STRING_BUFLEN];
    char hostuuidstr[VIR_UUID_STRING_BUFLEN];
702
    size_t i;
703 704

    virUUIDFormat(mig->uuid, uuidstr);
705
    virUUIDFormat(mig->localHostuuid, hostuuidstr);
706

707
    virBufferAddLit(buf, "<qemu-migration>\n");
708 709 710 711 712
    virBufferAdjustIndent(buf, 2);
    virBufferEscapeString(buf, "<name>%s</name>\n", mig->name);
    virBufferAsprintf(buf, "<uuid>%s</uuid>\n", uuidstr);
    virBufferEscapeString(buf, "<hostname>%s</hostname>\n", mig->localHostname);
    virBufferAsprintf(buf, "<hostuuid>%s</hostuuid>\n", hostuuidstr);
713

714
    for (i = 0; i < QEMU_MIGRATION_COOKIE_FLAG_LAST; i++) {
715
        if (mig->flagsMandatory & (1 << i))
716
            virBufferAsprintf(buf, "<feature name='%s'/>\n",
717 718 719
                              qemuMigrationCookieFlagTypeToString(i));
    }

720 721
    if ((mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
        mig->graphics)
722 723
        qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics);

724 725
    if ((mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
        mig->lockState) {
726
        virBufferAsprintf(buf, "<lockstate driver='%s'>\n",
727
                          mig->lockDriver);
728 729
        virBufferAdjustIndent(buf, 2);
        virBufferAsprintf(buf, "<leases>%s</leases>\n",
730
                          mig->lockState);
731 732
        virBufferAdjustIndent(buf, -2);
        virBufferAddLit(buf, "</lockstate>\n");
733 734
    }

735 736
    if ((mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) &&
        mig->persistent) {
737 738 739
        if (qemuDomainDefFormatBuf(driver,
                                   mig->persistent,
                                   VIR_DOMAIN_XML_INACTIVE |
740 741
                                   VIR_DOMAIN_XML_SECURE |
                                   VIR_DOMAIN_XML_MIGRATABLE,
742
                                   buf) < 0)
743
            return -1;
744 745
    }

746 747 748
    if ((mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) && mig->network)
        qemuMigrationCookieNetworkXMLFormat(buf, mig->network);

749
    if ((mig->flags & QEMU_MIGRATION_COOKIE_NBD) && mig->nbd) {
750
        virBufferAddLit(buf, "<nbd");
751 752 753 754 755
        if (mig->nbd->port)
            virBufferAsprintf(buf, " port='%d'", mig->nbd->port);
        virBufferAddLit(buf, "/>\n");
    }

756 757 758
    if (mig->flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo)
        qemuMigrationCookieStatisticsXMLFormat(buf, mig->jobInfo);

759
    virBufferAdjustIndent(buf, -2);
760
    virBufferAddLit(buf, "</qemu-migration>\n");
761
    return 0;
762 763 764
}


765
static char *qemuMigrationCookieXMLFormatStr(virQEMUDriverPtr driver,
766
                                             qemuMigrationCookiePtr mig)
767 768 769
{
    virBuffer buf = VIR_BUFFER_INITIALIZER;

770
    if (qemuMigrationCookieXMLFormat(driver, &buf, mig) < 0) {
771 772 773
        virBufferFreeAndReset(&buf);
        return NULL;
    }
774

775
    if (virBufferCheckError(&buf) < 0)
776 777 778 779 780 781
        return NULL;

    return virBufferContentAndReset(&buf);
}


782 783 784 785 786 787 788
static qemuMigrationCookieGraphicsPtr
qemuMigrationCookieGraphicsXMLParse(xmlXPathContextPtr ctxt)
{
    qemuMigrationCookieGraphicsPtr grap;
    char *tmp;

    if (VIR_ALLOC(grap) < 0)
789
        goto error;
790 791

    if (!(tmp = virXPathString("string(./graphics/@type)", ctxt))) {
792 793
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing type attribute in migration data"));
794 795 796
        goto error;
    }
    if ((grap->type = virDomainGraphicsTypeFromString(tmp)) < 0) {
797 798
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("unknown graphics type %s"), tmp);
799 800 801
        VIR_FREE(tmp);
        goto error;
    }
E
Eric Blake 已提交
802
    VIR_FREE(tmp);
803
    if (virXPathInt("string(./graphics/@port)", ctxt, &grap->port) < 0) {
804 805
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing port attribute in migration data"));
806 807 808 809
        goto error;
    }
    if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
        if (virXPathInt("string(./graphics/@tlsPort)", ctxt, &grap->tlsPort) < 0) {
810 811
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           "%s", _("missing tlsPort attribute in migration data"));
812 813 814 815
            goto error;
        }
    }
    if (!(grap->listen = virXPathString("string(./graphics/@listen)", ctxt))) {
816 817
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing listen attribute in migration data"));
818 819 820
        goto error;
    }
    /* Optional */
821
    grap->tlsSubject = virXPathString("string(./graphics/cert[@info='subject']/@value)", ctxt);
822 823 824

    return grap;

825
 error:
826 827 828 829 830
    qemuMigrationCookieGraphicsFree(grap);
    return NULL;
}


831 832 833 834
static qemuMigrationCookieNetworkPtr
qemuMigrationCookieNetworkXMLParse(xmlXPathContextPtr ctxt)
{
    qemuMigrationCookieNetworkPtr optr;
835
    size_t i;
836 837 838 839 840 841
    int n;
    xmlNodePtr *interfaces = NULL;
    char *vporttype;
    xmlNodePtr save_ctxt = ctxt->node;

    if (VIR_ALLOC(optr) < 0)
842
        goto error;
843 844 845 846 847 848 849 850

    if ((n = virXPathNodeSet("./network/interface", ctxt, &interfaces)) < 0) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing interface information"));
        goto error;
    }

    optr->nnets = n;
851
    if (VIR_ALLOC_N(optr->net, optr->nnets) < 0)
852
        goto error;
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868

    for (i = 0; i < n; i++) {
        /* portdata is optional, and may not exist */
        ctxt->node = interfaces[i];
        optr->net[i].portdata = virXPathString("string(./portdata[1])", ctxt);

        if (!(vporttype = virXMLPropString(interfaces[i], "vporttype"))) {
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           "%s", _("missing vporttype attribute in migration data"));
            goto error;
        }
        optr->net[i].vporttype = virNetDevVPortTypeFromString(vporttype);
    }

    VIR_FREE(interfaces);

869
 cleanup:
870 871 872
    ctxt->node = save_ctxt;
    return optr;

873
 error:
874 875 876 877 878 879 880
    VIR_FREE(interfaces);
    qemuMigrationCookieNetworkFree(optr);
    optr = NULL;
    goto cleanup;
}


881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
static qemuDomainJobInfoPtr
qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
{
    qemuDomainJobInfoPtr jobInfo = NULL;
    qemuMonitorMigrationStatus *status;
    xmlNodePtr save_ctxt = ctxt->node;

    if (!(ctxt->node = virXPathNode("./statistics", ctxt)))
        goto cleanup;

    if (VIR_ALLOC(jobInfo) < 0)
        goto cleanup;

    status = &jobInfo->status;
    jobInfo->type = VIR_DOMAIN_JOB_COMPLETED;

897 898 899
    virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started);
    virXPathULongLong("string(./stopped[1])", ctxt, &jobInfo->stopped);

900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_ELAPSED "[1])",
                      ctxt, &jobInfo->timeElapsed);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_REMAINING "[1])",
                      ctxt, &jobInfo->timeRemaining);
    if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_DOWNTIME "[1])",
                          ctxt, &status->downtime) == 0)
        status->downtime_set = true;

    virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_TOTAL "[1])",
                      ctxt, &status->ram_total);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_PROCESSED "[1])",
                      ctxt, &status->ram_transferred);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_REMAINING "[1])",
                      ctxt, &status->ram_remaining);

    if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_CONSTANT "[1])",
                          ctxt, &status->ram_duplicate) == 0)
        status->ram_duplicate_set = true;
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL "[1])",
                      ctxt, &status->ram_normal);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES "[1])",
                      ctxt, &status->ram_normal_bytes);

    virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_TOTAL "[1])",
                      ctxt, &status->disk_total);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_PROCESSED "[1])",
                      ctxt, &status->disk_transferred);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_REMAINING "[1])",
                      ctxt, &status->disk_remaining);

    if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_CACHE "[1])",
                          ctxt, &status->xbzrle_cache_size) == 0)
        status->xbzrle_set = true;
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_BYTES "[1])",
                      ctxt, &status->xbzrle_bytes);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_PAGES "[1])",
                      ctxt, &status->xbzrle_pages);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES "[1])",
                      ctxt, &status->xbzrle_cache_miss);
    virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW "[1])",
                      ctxt, &status->xbzrle_overflow);

 cleanup:
    ctxt->node = save_ctxt;
    return jobInfo;
}


948 949
static int
qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
950
                            virQEMUDriverPtr driver,
951
                            xmlDocPtr doc,
952
                            xmlXPathContextPtr ctxt,
E
Eric Blake 已提交
953
                            unsigned int flags)
954 955
{
    char uuidstr[VIR_UUID_STRING_BUFLEN];
956
    char *tmp = NULL;
957
    xmlNodePtr *nodes = NULL;
958 959
    size_t i;
    int n;
960 961 962 963
    virCapsPtr caps = NULL;

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto error;
964 965 966 967 968 969 970 971

    /* We don't store the uuid, name, hostname, or hostuuid
     * values. We just compare them to local data to do some
     * sanity checking on migration operation
     */

    /* Extract domain name */
    if (!(tmp = virXPathString("string(./name[1])", ctxt))) {
972 973
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing name element in migration data"));
974 975 976
        goto error;
    }
    if (STRNEQ(tmp, mig->name)) {
977 978 979
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Incoming cookie data had unexpected name %s vs %s"),
                       tmp, mig->name);
980 981 982 983 984 985 986
        goto error;
    }
    VIR_FREE(tmp);

    /* Extract domain uuid */
    tmp = virXPathString("string(./uuid[1])", ctxt);
    if (!tmp) {
987 988
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing uuid element in migration data"));
989 990 991 992
        goto error;
    }
    virUUIDFormat(mig->uuid, uuidstr);
    if (STRNEQ(tmp, uuidstr)) {
993 994 995
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Incoming cookie data had unexpected UUID %s vs %s"),
                       tmp, uuidstr);
996 997 998 999
    }
    VIR_FREE(tmp);

    /* Check & forbid "localhost" migration */
1000
    if (!(mig->remoteHostname = virXPathString("string(./hostname[1])", ctxt))) {
1001 1002
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing hostname element in migration data"));
1003 1004
        goto error;
    }
1005
    if (STREQ(mig->remoteHostname, mig->localHostname)) {
1006 1007 1008
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Attempt to migrate guest to the same host %s"),
                       mig->remoteHostname);
1009 1010 1011 1012
        goto error;
    }

    if (!(tmp = virXPathString("string(./hostuuid[1])", ctxt))) {
1013 1014 1015
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("missing hostuuid element in migration data"));
        goto error;
1016
    }
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
    if (virUUIDParse(tmp, mig->remoteHostuuid) < 0) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       "%s", _("malformed hostuuid element in migration data"));
        goto error;
    }
    if (memcmp(mig->remoteHostuuid, mig->localHostuuid, VIR_UUID_BUFLEN) == 0) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Attempt to migrate guest to the same host %s"),
                       tmp);
        goto error;
    }
    VIR_FREE(tmp);
1029

1030 1031
    /* Check to ensure all mandatory features from XML are also
     * present in 'flags' */
1032
    if ((n = virXPathNodeSet("./feature", ctxt, &nodes)) < 0)
1033 1034
        goto error;

1035
    for (i = 0; i < n; i++) {
1036 1037 1038
        int val;
        char *str = virXMLPropString(nodes[i], "name");
        if (!str) {
1039 1040
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           "%s", _("missing feature name"));
1041 1042 1043 1044
            goto error;
        }

        if ((val = qemuMigrationCookieFlagTypeFromString(str)) < 0) {
1045 1046 1047
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Unknown migration cookie feature %s"),
                           str);
1048 1049 1050 1051 1052
            VIR_FREE(str);
            goto error;
        }

        if ((flags & (1 << val)) == 0) {
1053 1054 1055
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Unsupported migration cookie feature %s"),
                           str);
1056 1057 1058 1059 1060 1061
            VIR_FREE(str);
        }
        VIR_FREE(str);
    }
    VIR_FREE(nodes);

1062 1063 1064 1065 1066
    if ((flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
        virXPathBoolean("count(./graphics) > 0", ctxt) &&
        (!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt))))
        goto error;

1067 1068 1069 1070
    if ((flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
        virXPathBoolean("count(./lockstate) > 0", ctxt)) {
        mig->lockDriver = virXPathString("string(./lockstate[1]/@driver)", ctxt);
        if (!mig->lockDriver) {
1071 1072
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("Missing lock driver name in migration cookie"));
1073 1074 1075 1076 1077 1078 1079
            goto error;
        }
        mig->lockState = virXPathString("string(./lockstate[1]/leases[1])", ctxt);
        if (mig->lockState && STREQ(mig->lockState, ""))
            VIR_FREE(mig->lockState);
    }

1080 1081 1082
    if ((flags & QEMU_MIGRATION_COOKIE_PERSISTENT) &&
        virXPathBoolean("count(./domain) > 0", ctxt)) {
        if ((n = virXPathNodeSet("./domain", ctxt, &nodes)) > 1) {
1083 1084 1085 1086
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Too many domain elements in "
                             "migration cookie: %d"),
                           n);
1087 1088
            goto error;
        }
1089 1090
        mig->persistent = virDomainDefParseNode(doc, nodes[0],
                                                caps, driver->xmlopt,
1091 1092 1093 1094 1095 1096 1097 1098 1099
                                                -1, VIR_DOMAIN_XML_INACTIVE);
        if (!mig->persistent) {
            /* virDomainDefParseNode already reported
             * an error for us */
            goto error;
        }
        VIR_FREE(nodes);
    }

1100 1101 1102 1103 1104
    if ((flags & QEMU_MIGRATION_COOKIE_NETWORK) &&
        virXPathBoolean("count(./network) > 0", ctxt) &&
        (!(mig->network = qemuMigrationCookieNetworkXMLParse(ctxt))))
        goto error;

1105 1106 1107 1108
    if (flags & QEMU_MIGRATION_COOKIE_NBD &&
        virXPathBoolean("boolean(./nbd)", ctxt)) {
        char *port;

1109
        if (VIR_ALLOC(mig->nbd) < 0)
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
            goto error;

        port = virXPathString("string(./nbd/@port)", ctxt);
        if (port && virStrToLong_i(port, NULL, 10, &mig->nbd->port) < 0) {
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Malformed nbd port '%s'"),
                           port);
            VIR_FREE(port);
            goto error;
        }
        VIR_FREE(port);
    }

1123 1124 1125 1126 1127
    if (flags & QEMU_MIGRATION_COOKIE_STATS &&
        virXPathBoolean("boolean(./statistics)", ctxt) &&
        (!(mig->jobInfo = qemuMigrationCookieStatisticsXMLParse(ctxt))))
        goto error;

1128
    virObjectUnref(caps);
1129 1130
    return 0;

1131
 error:
1132
    VIR_FREE(tmp);
1133
    VIR_FREE(nodes);
1134
    virObjectUnref(caps);
1135 1136 1137 1138 1139 1140
    return -1;
}


static int
qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig,
1141
                               virQEMUDriverPtr driver,
1142
                               const char *xml,
E
Eric Blake 已提交
1143
                               unsigned int flags)
1144 1145 1146
{
    xmlDocPtr doc = NULL;
    xmlXPathContextPtr ctxt = NULL;
1147
    int ret = -1;
1148 1149 1150

    VIR_DEBUG("xml=%s", NULLSTR(xml));

1151
    if (!(doc = virXMLParseStringCtxt(xml, _("(qemu_migration_cookie)"), &ctxt)))
1152 1153
        goto cleanup;

1154
    ret = qemuMigrationCookieXMLParse(mig, driver, doc, ctxt, flags);
1155

1156
 cleanup:
1157 1158 1159 1160 1161 1162 1163 1164 1165
    xmlXPathFreeContext(ctxt);
    xmlFreeDoc(doc);

    return ret;
}


static int
qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
1166
                        virQEMUDriverPtr driver,
1167
                        virDomainObjPtr dom,
1168 1169
                        char **cookieout,
                        int *cookieoutlen,
E
Eric Blake 已提交
1170
                        unsigned int flags)
1171
{
1172 1173
    if (!cookieout || !cookieoutlen)
        return 0;
1174 1175 1176

    *cookieoutlen = 0;

1177 1178 1179 1180
    if (flags & QEMU_MIGRATION_COOKIE_GRAPHICS &&
        qemuMigrationCookieAddGraphics(mig, driver, dom) < 0)
        return -1;

1181 1182 1183 1184
    if (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE &&
        qemuMigrationCookieAddLockstate(mig, driver, dom) < 0)
        return -1;

1185 1186 1187 1188
    if (flags & QEMU_MIGRATION_COOKIE_PERSISTENT &&
        qemuMigrationCookieAddPersistent(mig, dom) < 0)
        return -1;

1189 1190 1191 1192 1193
    if (flags & QEMU_MIGRATION_COOKIE_NETWORK &&
        qemuMigrationCookieAddNetwork(mig, driver, dom) < 0) {
        return -1;
    }

1194 1195 1196 1197
    if ((flags & QEMU_MIGRATION_COOKIE_NBD) &&
        qemuMigrationCookieAddNBD(mig, driver, dom) < 0)
        return -1;

1198 1199 1200 1201
    if (flags & QEMU_MIGRATION_COOKIE_STATS &&
        qemuMigrationCookieAddStatistics(mig, dom) < 0)
        return -1;

1202
    if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
        return -1;

    *cookieoutlen = strlen(*cookieout) + 1;

    VIR_DEBUG("cookielen=%d cookie=%s", *cookieoutlen, *cookieout);

    return 0;
}


static qemuMigrationCookiePtr
1214
qemuMigrationEatCookie(virQEMUDriverPtr driver,
1215
                       virDomainObjPtr dom,
1216 1217
                       const char *cookiein,
                       int cookieinlen,
E
Eric Blake 已提交
1218
                       unsigned int flags)
1219 1220 1221 1222 1223 1224
{
    qemuMigrationCookiePtr mig = NULL;

    /* Parse & validate incoming cookie (if any) */
    if (cookiein && cookieinlen &&
        cookiein[cookieinlen-1] != '\0') {
1225 1226
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("Migration cookie was not NULL terminated"));
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
        goto error;
    }

    VIR_DEBUG("cookielen=%d cookie='%s'", cookieinlen, NULLSTR(cookiein));

    if (!(mig = qemuMigrationCookieNew(dom)))
        return NULL;

    if (cookiein && cookieinlen &&
        qemuMigrationCookieXMLParseStr(mig,
1237
                                       driver,
1238 1239 1240 1241
                                       cookiein,
                                       flags) < 0)
        goto error;

1242 1243 1244
    if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
        if (!mig->lockDriver) {
            if (virLockManagerPluginUsesState(driver->lockManager)) {
1245 1246 1247
                virReportError(VIR_ERR_INTERNAL_ERROR,
                               _("Missing %s lock state for migration cookie"),
                               virLockManagerPluginGetName(driver->lockManager));
1248 1249 1250 1251
                goto error;
            }
        } else if (STRNEQ(mig->lockDriver,
                          virLockManagerPluginGetName(driver->lockManager))) {
1252 1253 1254 1255
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("Source host lock driver %s different from target %s"),
                           mig->lockDriver,
                           virLockManagerPluginGetName(driver->lockManager));
1256 1257 1258 1259
            goto error;
        }
    }

1260 1261
    return mig;

1262
 error:
1263 1264 1265
    qemuMigrationCookieFree(mig);
    return NULL;
}
1266

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
static void
qemuMigrationStoreDomainState(virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    priv->preMigrationState = virDomainObjGetState(vm, NULL);

    VIR_DEBUG("Storing pre-migration state=%d domain=%p",
              priv->preMigrationState, vm);
}

/* Returns true if the domain was resumed, false otherwise */
static bool
qemuMigrationRestoreDomainState(virConnectPtr conn, virDomainObjPtr vm)
{
    virQEMUDriverPtr driver = conn->privateData;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int state = virDomainObjGetState(vm, NULL);
    bool ret = false;

    VIR_DEBUG("driver=%p, vm=%p, pre-mig-state=%d, state=%d",
              driver, vm, priv->preMigrationState, state);

    if (state == VIR_DOMAIN_PAUSED &&
        priv->preMigrationState == VIR_DOMAIN_RUNNING) {
        /* This is basically the only restore possibility that's safe
         * and we should attempt to do */

        VIR_DEBUG("Restoring pre-migration state due to migration error");

        /* we got here through some sort of failure; start the domain again */
        if (qemuProcessStartCPUs(driver, vm, conn,
                                 VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
                                 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
            /* Hm, we already know we are in error here.  We don't want to
             * overwrite the previous error, though, so we just throw something
             * to the logs and hope for the best */
            VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name);
            goto cleanup;
        }
        ret = true;
    }

 cleanup:
    priv->preMigrationState = VIR_DOMAIN_NOSTATE;
    return ret;
}

1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
/**
 * qemuMigrationStartNBDServer:
 * @driver: qemu driver
 * @vm: domain
 *
 * Starts NBD server. This is a newer method to copy
 * storage during migration than using 'blk' and 'inc'
 * arguments in 'migrate' monitor command.
 * Error is reported here.
 *
 * Returns 0 on success, -1 otherwise.
 */
static int
qemuMigrationStartNBDServer(virQEMUDriverPtr driver,
J
Ján Tomko 已提交
1328 1329
                            virDomainObjPtr vm,
                            const char *listenAddr)
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
{
    int ret = -1;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    unsigned short port = 0;
    char *diskAlias = NULL;
    size_t i;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];

        /* skip shared, RO and source-less disks */
1341 1342
        if (disk->src->shared || disk->src->readonly ||
            !virDomainDiskGetSource(disk))
1343 1344 1345 1346
            continue;

        VIR_FREE(diskAlias);
        if (virAsprintf(&diskAlias, "%s%s",
1347
                        QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
1348 1349 1350 1351 1352 1353 1354
            goto cleanup;

        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
            goto cleanup;

        if (!port &&
1355
            ((virPortAllocatorAcquire(driver->migrationPorts, &port) < 0) ||
1356
             (qemuMonitorNBDServerStart(priv->mon, listenAddr, port) < 0))) {
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
            qemuDomainObjExitMonitor(driver, vm);
            goto cleanup;
        }

        if (qemuMonitorNBDServerAdd(priv->mon, diskAlias, true) < 0) {
            qemuDomainObjExitMonitor(driver, vm);
            goto cleanup;
        }
        qemuDomainObjExitMonitor(driver, vm);
    }

    priv->nbdPort = port;
    ret = 0;

1371
 cleanup:
1372
    VIR_FREE(diskAlias);
1373
    if (ret < 0)
1374 1375 1376 1377
        virPortAllocatorRelease(driver->remotePorts, port);
    return ret;
}

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
/**
 * qemuMigrationDriveMirror:
 * @driver: qemu driver
 * @vm: domain
 * @mig: migration cookie
 * @host: where are we migrating to
 * @speed: how much should the copying be limited
 * @migrate_flags: migrate monitor command flags
 *
 * Run drive-mirror to feed NBD server running on dst and wait
 * till the process switches into another phase where writes go
 * simultaneously to both source and destination. And this switch
 * is what we are waiting for before proceeding with the next
 * disk. On success, update @migrate_flags so we don't tell
 * 'migrate' command to do the very same operation.
 *
 * Returns 0 on success (@migrate_flags updated),
 *        -1 otherwise.
 */
static int
qemuMigrationDriveMirror(virQEMUDriverPtr driver,
                         virDomainObjPtr vm,
                         qemuMigrationCookiePtr mig,
                         const char *host,
                         unsigned long speed,
                         unsigned int *migrate_flags)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret = -1;
    int mon_ret;
    int port;
    size_t i, lastGood = 0;
    char *diskAlias = NULL;
    char *nbd_dest = NULL;
1412
    char *hoststr = NULL;
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
    unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
    virErrorPtr err = NULL;

    if (!(*migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                            QEMU_MONITOR_MIGRATE_NON_SHARED_INC)))
        return 0;

    if (!mig->nbd) {
        /* Destination doesn't support NBD server.
         * Fall back to previous implementation. */
        VIR_DEBUG("Destination doesn't support NBD server "
                  "Falling back to previous implementation.");
        return 0;
    }

    /* steal NBD port and thus prevent its propagation back to destination */
    port = mig->nbd->port;
    mig->nbd->port = 0;

1432 1433
    /* escape literal IPv6 address */
    if (strchr(host, ':')) {
1434
        if (virAsprintf(&hoststr, "[%s]", host) < 0)
1435 1436 1437 1438 1439
            goto error;
    } else if (VIR_STRDUP(hoststr, host) < 0) {
        goto error;
    }

1440 1441 1442 1443 1444 1445 1446 1447
    if (*migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC)
        mirror_flags |= VIR_DOMAIN_BLOCK_REBASE_SHALLOW;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        virDomainBlockJobInfo info;

        /* skip shared, RO and source-less disks */
1448 1449
        if (disk->src->shared || disk->src->readonly ||
            !virDomainDiskGetSource(disk))
1450 1451 1452 1453 1454 1455 1456
            continue;

        VIR_FREE(diskAlias);
        VIR_FREE(nbd_dest);
        if ((virAsprintf(&diskAlias, "%s%s",
                         QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0) ||
            (virAsprintf(&nbd_dest, "nbd:%s:%d:exportname=%s",
1457
                         hoststr, port, diskAlias) < 0))
1458 1459 1460 1461 1462 1463
            goto error;

        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
            goto error;
        mon_ret = qemuMonitorDriveMirror(priv->mon, diskAlias, nbd_dest,
1464
                                         NULL, speed, 0, 0, mirror_flags);
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
        qemuDomainObjExitMonitor(driver, vm);

        if (mon_ret < 0)
            goto error;

        lastGood = i;

        /* wait for completion */
        while (true) {
            /* Poll every 500ms for progress & to allow cancellation */
            struct timespec ts = { .tv_sec = 0, .tv_nsec = 500 * 1000 * 1000ull };

            memset(&info, 0, sizeof(info));

            if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                               QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
                goto error;
            if (priv->job.asyncAbort) {
                /* explicitly do this *after* we entered the monitor,
                 * as this is a critical section so we are guaranteed
                 * priv->job.asyncAbort will not change */
                qemuDomainObjExitMonitor(driver, vm);
                virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                               qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                               _("canceled by client"));
                goto error;
            }
1492 1493
            mon_ret = qemuMonitorBlockJobInfo(priv->mon, diskAlias, &info,
                                              NULL);
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
            qemuDomainObjExitMonitor(driver, vm);

            if (mon_ret < 0)
                goto error;

            if (info.cur == info.end) {
                VIR_DEBUG("Drive mirroring of '%s' completed", diskAlias);
                break;
            }

            /* XXX Frankly speaking, we should listen to the events,
             * instead of doing this. But this works for now and we
             * are doing something similar in migration itself anyway */

            virObjectUnlock(vm);

            nanosleep(&ts, NULL);

            virObjectLock(vm);
        }
    }

    /* Okay, copied. Modify migrate_flags */
    *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                        QEMU_MONITOR_MIGRATE_NON_SHARED_INC);
    ret = 0;

1521
 cleanup:
1522 1523
    VIR_FREE(diskAlias);
    VIR_FREE(nbd_dest);
1524
    VIR_FREE(hoststr);
1525 1526
    return ret;

1527
 error:
1528 1529 1530 1531 1532 1533 1534
    /* don't overwrite any errors */
    err = virSaveLastError();
    /* cancel any outstanding jobs */
    while (lastGood) {
        virDomainDiskDefPtr disk = vm->def->disks[--lastGood];

        /* skip shared, RO disks */
1535 1536
        if (disk->src->shared || disk->src->readonly ||
            !virDomainDiskGetSource(disk))
1537 1538 1539 1540
            continue;

        VIR_FREE(diskAlias);
        if (virAsprintf(&diskAlias, "%s%s",
1541
                        QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
1542 1543 1544
            continue;
        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
1545
            if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, NULL, 0,
1546
                                    BLOCK_JOB_ABORT, true) < 0) {
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
                VIR_WARN("Unable to cancel block-job on '%s'", diskAlias);
            }
            qemuDomainObjExitMonitor(driver, vm);
        } else {
            VIR_WARN("Unable to enter monitor. No block job cancelled");
        }
    }
    if (err)
        virSetError(err);
    virFreeError(err);
    goto cleanup;
}
1559

1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583

static void
qemuMigrationStopNBDServer(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           qemuMigrationCookiePtr mig)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (!mig->nbd)
        return;

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
        return;

    if (qemuMonitorNBDServerStop(priv->mon) < 0)
        VIR_WARN("Unable to stop NBD server");

    qemuDomainObjExitMonitor(driver, vm);

    virPortAllocatorRelease(driver->remotePorts, priv->nbdPort);
    priv->nbdPort = 0;
}

1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
static void
qemuMigrationCancelDriveMirror(qemuMigrationCookiePtr mig,
                               virQEMUDriverPtr driver,
                               virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    size_t i;
    char *diskAlias = NULL;

    VIR_DEBUG("mig=%p nbdPort=%d", mig->nbd, priv->nbdPort);

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];

        /* skip shared, RO and source-less disks */
1599 1600
        if (disk->src->shared || disk->src->readonly ||
            !virDomainDiskGetSource(disk))
1601 1602 1603 1604
            continue;

        VIR_FREE(diskAlias);
        if (virAsprintf(&diskAlias, "%s%s",
1605
                        QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
1606 1607 1608 1609 1610 1611
            goto cleanup;

        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
            goto cleanup;

1612
        if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, NULL, 0,
1613
                                BLOCK_JOB_ABORT, true) < 0)
1614 1615 1616 1617
            VIR_WARN("Unable to stop block job on %s", diskAlias);
        qemuDomainObjExitMonitor(driver, vm);
    }

1618
 cleanup:
1619 1620 1621 1622
    VIR_FREE(diskAlias);
    return;
}

1623 1624 1625 1626 1627 1628 1629 1630 1631
/* Validate whether the domain is safe to migrate.  If vm is NULL,
 * then this is being run in the v2 Prepare stage on the destination
 * (where we only have the target xml); if vm is provided, then this
 * is being run in either v2 Perform or v3 Begin (where we also have
 * access to all of the domain's metadata, such as whether it is
 * marked autodestroy or has snapshots).  While it would be nice to
 * assume that checking on source is sufficient to prevent ever
 * talking to the destination in the first place, we are stuck with
 * the fact that older servers did not do checks on the source. */
1632
bool
1633
qemuMigrationIsAllowed(virQEMUDriverPtr driver, virDomainObjPtr vm,
1634
                       virDomainDefPtr def, bool remote, bool abort_on_error)
1635
{
1636
    int nsnapshots;
1637
    int pauseReason;
1638
    bool forbid;
1639
    size_t i;
1640 1641 1642

    if (vm) {
        if (qemuProcessAutoDestroyActive(driver, vm)) {
1643 1644
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("domain is marked for auto destroy"));
1645 1646
            return false;
        }
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659

        /* perform these checks only when migrating to remote hosts */
        if (remote) {
            nsnapshots = virDomainSnapshotObjListNum(vm->snapshots, NULL, 0);
            if (nsnapshots < 0)
                return false;

            if (nsnapshots > 0) {
                virReportError(VIR_ERR_OPERATION_INVALID,
                               _("cannot migrate domain with %d snapshots"),
                               nsnapshots);
                return false;
            }
1660 1661

            /* cancel migration if disk I/O error is emitted while migrating */
1662 1663
            if (abort_on_error &&
                virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
1664 1665 1666 1667 1668 1669
                pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
                virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                               _("cannot migrate domain with I/O error"));
                return false;
            }

1670
        }
1671

E
Eric Blake 已提交
1672 1673
        if (virDomainHasDiskMirror(vm)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1674
                           _("domain has an active block job"));
E
Eric Blake 已提交
1675 1676
            return false;
        }
1677 1678 1679

        def = vm->def;
    }
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694

    /* Migration with USB host devices is allowed, all other devices are
     * forbidden.
     */
    forbid = false;
    for (i = 0; i < def->nhostdevs; i++) {
        virDomainHostdevDefPtr hostdev = def->hostdevs[i];
        if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
            hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB) {
            forbid = true;
            break;
        }
    }
    if (forbid) {
        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1695
                       _("domain has assigned non-USB host devices"));
1696 1697 1698
        return false;
    }

1699
    for (i = 0; def->cpu && i < def->cpu->nfeatures; i++) {
J
Ján Tomko 已提交
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
        virCPUFeatureDefPtr feature = &def->cpu->features[i];

        if (feature->policy != VIR_CPU_FEATURE_REQUIRE)
            continue;

        /* QEMU blocks migration and save with invariant TSC enabled */
        if (STREQ(feature->name, "invtsc")) {
            virReportError(VIR_ERR_OPERATION_INVALID,
                           _("domain has CPU feature: %s"),
                           feature->name);
            return false;
        }
    }

1714 1715 1716
    return true;
}

1717 1718 1719
static bool
qemuMigrationIsSafe(virDomainDefPtr def)
{
1720
    size_t i;
1721

1722
    for (i = 0; i < def->ndisks; i++) {
1723
        virDomainDiskDefPtr disk = def->disks[i];
1724
        const char *src = virDomainDiskGetSource(disk);
1725

1726 1727
        /* Our code elsewhere guarantees shared disks are either readonly (in
         * which case cache mode doesn't matter) or used with cache=none */
1728
        if (src &&
1729 1730
            !disk->src->shared &&
            !disk->src->readonly &&
1731
            disk->cachemode != VIR_DOMAIN_DISK_CACHE_DISABLE) {
1732
            int rc;
1733

E
Eric Blake 已提交
1734
            if (virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE) {
1735
                if ((rc = virFileIsSharedFS(src)) < 0)
1736 1737
                    return false;
                else if (rc == 0)
1738
                    continue;
1739
                if ((rc = virStorageFileIsClusterFS(src)) < 0)
1740
                    return false;
1741 1742
                else if (rc == 1)
                    continue;
1743 1744
            } else if (disk->src->type == VIR_STORAGE_TYPE_NETWORK &&
                       disk->src->protocol == VIR_STORAGE_NET_PROTOCOL_RBD) {
1745
                continue;
1746
            }
1747

1748 1749 1750
            virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
                           _("Migration may lead to data corruption if disks"
                             " use cache != none"));
1751 1752 1753 1754 1755 1756 1757
            return false;
        }
    }

    return true;
}

1758 1759 1760 1761
/** qemuMigrationSetOffline
 * Pause domain for non-live migration.
 */
int
1762
qemuMigrationSetOffline(virQEMUDriverPtr driver,
1763 1764 1765
                        virDomainObjPtr vm)
{
    int ret;
1766
    VIR_DEBUG("driver=%p vm=%p", driver, vm);
1767 1768
    ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
                              QEMU_ASYNC_JOB_MIGRATION_OUT);
1769
    if (ret == 0) {
1770
        virObjectEventPtr event;
1771

1772
        event = virDomainEventLifecycleNewFromObj(vm,
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
                                         VIR_DOMAIN_EVENT_SUSPENDED,
                                         VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
        if (event)
            qemuDomainEventQueue(driver, event);
    }

    return ret;
}


1783 1784 1785
static int
qemuMigrationSetCompression(virQEMUDriverPtr driver,
                            virDomainObjPtr vm,
1786
                            qemuDomainAsyncJob job)
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0)
        return -1;

    ret = qemuMonitorGetMigrationCapability(
                priv->mon,
                QEMU_MONITOR_MIGRATION_CAPS_XBZRLE);

    if (ret < 0) {
        goto cleanup;
    } else if (ret == 0) {
        if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
            virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                           _("Compressed migration is not supported by "
                             "target QEMU binary"));
        } else {
            virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                           _("Compressed migration is not supported by "
                             "source QEMU binary"));
        }
        ret = -1;
        goto cleanup;
    }

    ret = qemuMonitorSetMigrationCapability(
                priv->mon,
                QEMU_MONITOR_MIGRATION_CAPS_XBZRLE);

1818
 cleanup:
1819 1820 1821 1822
    qemuDomainObjExitMonitor(driver, vm);
    return ret;
}

1823 1824 1825
static int
qemuMigrationSetAutoConverge(virQEMUDriverPtr driver,
                             virDomainObjPtr vm,
1826
                             qemuDomainAsyncJob job)
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0)
        return -1;

    ret = qemuMonitorGetMigrationCapability(
                priv->mon,
                QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE);

    if (ret < 0) {
        goto cleanup;
    } else if (ret == 0) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("Auto-Converge is not supported by "
                         "QEMU binary"));
        ret = -1;
        goto cleanup;
    }

    ret = qemuMonitorSetMigrationCapability(
                priv->mon,
                QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE);

1852
 cleanup:
1853 1854 1855 1856 1857
    qemuDomainObjExitMonitor(driver, vm);
    return ret;
}


1858 1859 1860 1861 1862 1863 1864
static int
qemuMigrationWaitForSpice(virQEMUDriverPtr driver,
                          virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    bool wait_for_spice = false;
    bool spice_migrated = false;
1865
    size_t i = 0;
1866

1867 1868 1869 1870 1871 1872 1873 1874
    if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SEAMLESS_MIGRATION)) {
        for (i = 0; i < vm->def->ngraphics; i++) {
            if (vm->def->graphics[i]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
                wait_for_spice = true;
                break;
            }
        }
    }
1875 1876 1877 1878 1879 1880 1881 1882

    if (!wait_for_spice)
        return 0;

    while (!spice_migrated) {
        /* Poll every 50ms for progress & to allow cancellation */
        struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };

1883 1884 1885 1886
        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
            return -1;

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
        if (qemuMonitorGetSpiceMigrationStatus(priv->mon,
                                               &spice_migrated) < 0) {
            qemuDomainObjExitMonitor(driver, vm);
            return -1;
        }
        qemuDomainObjExitMonitor(driver, vm);
        virObjectUnlock(vm);
        nanosleep(&ts, NULL);
        virObjectLock(vm);
    }

    return 0;
}
1900

1901
static int
1902
qemuMigrationUpdateJobStatus(virQEMUDriverPtr driver,
1903
                             virDomainObjPtr vm,
1904
                             const char *job,
1905
                             qemuDomainAsyncJob asyncJob)
1906
{
1907
    qemuDomainObjPrivatePtr priv = vm->privateData;
1908
    qemuMonitorMigrationStatus status;
J
Jiri Denemark 已提交
1909 1910
    qemuDomainJobInfoPtr jobInfo;
    int ret;
1911 1912

    memset(&status, 0, sizeof(status));
1913

1914 1915
    ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
    if (ret < 0) {
1916 1917 1918
        /* Guest already exited or waiting for the job timed out; nothing
         * further to update. */
        return ret;
1919
    }
1920
    ret = qemuMonitorGetMigrationStatus(priv->mon, &status);
1921

1922
    qemuDomainObjExitMonitor(driver, vm);
1923

J
Jiri Denemark 已提交
1924 1925
    if (ret < 0 ||
        qemuDomainJobInfoUpdateTime(priv->job.current) < 0)
1926
        return -1;
1927

1928
    ret = -1;
J
Jiri Denemark 已提交
1929 1930 1931 1932 1933
    jobInfo = priv->job.current;
    switch (status.status) {
    case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
        jobInfo->type = VIR_DOMAIN_JOB_COMPLETED;
        /* fall through */
M
Michael Avdienko 已提交
1934
    case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
1935 1936 1937 1938
    case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
        ret = 0;
        break;

J
Jiri Denemark 已提交
1939 1940 1941 1942
    case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
        jobInfo->type = VIR_DOMAIN_JOB_NONE;
        virReportError(VIR_ERR_OPERATION_FAILED,
                       _("%s: %s"), job, _("is not active"));
1943 1944 1945
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
J
Jiri Denemark 已提交
1946
        jobInfo->type = VIR_DOMAIN_JOB_FAILED;
1947 1948
        virReportError(VIR_ERR_OPERATION_FAILED,
                       _("%s: %s"), job, _("unexpectedly failed"));
1949 1950 1951
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
J
Jiri Denemark 已提交
1952
        jobInfo->type = VIR_DOMAIN_JOB_CANCELLED;
1953 1954
        virReportError(VIR_ERR_OPERATION_ABORTED,
                       _("%s: %s"), job, _("canceled by client"));
1955 1956
        break;
    }
J
Jiri Denemark 已提交
1957
    jobInfo->status = status;
1958 1959 1960 1961 1962

    return ret;
}


1963 1964 1965
/* Returns 0 on success, -2 when migration needs to be cancelled, or -1 when
 * QEMU reports failed migration.
 */
1966
static int
J
Jiri Denemark 已提交
1967 1968
qemuMigrationWaitForCompletion(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
1969
                               qemuDomainAsyncJob asyncJob,
J
Jiri Denemark 已提交
1970 1971
                               virConnectPtr dconn,
                               bool abort_on_error)
1972
{
1973
    qemuDomainObjPrivatePtr priv = vm->privateData;
J
Jiri Denemark 已提交
1974
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
1975
    const char *job;
1976
    int pauseReason;
1977

1978 1979
    switch (priv->job.asyncJob) {
    case QEMU_ASYNC_JOB_MIGRATION_OUT:
1980 1981
        job = _("migration job");
        break;
1982
    case QEMU_ASYNC_JOB_SAVE:
1983 1984
        job = _("domain save job");
        break;
1985
    case QEMU_ASYNC_JOB_DUMP:
1986 1987 1988 1989 1990
        job = _("domain core dump job");
        break;
    default:
        job = _("job");
    }
1991

J
Jiri Denemark 已提交
1992
    jobInfo->type = VIR_DOMAIN_JOB_UNBOUNDED;
1993

J
Jiri Denemark 已提交
1994
    while (jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) {
1995 1996 1997
        /* Poll every 50ms for progress & to allow cancellation */
        struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };

1998
        if (qemuMigrationUpdateJobStatus(driver, vm, job, asyncJob) == -1)
1999
            break;
2000

2001
        /* cancel migration if disk I/O error is emitted while migrating */
2002
        if (abort_on_error &&
2003
            virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
2004 2005 2006 2007 2008
            pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
            virReportError(VIR_ERR_OPERATION_FAILED,
                           _("%s: %s"), job, _("failed due to I/O error"));
            break;
        }
2009

2010
        if (dconn && virConnectIsAlive(dconn) <= 0) {
2011 2012
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Lost connection to destination host"));
2013
            break;
2014 2015
        }

2016
        virObjectUnlock(vm);
2017 2018 2019

        nanosleep(&ts, NULL);

2020
        virObjectLock(vm);
2021 2022
    }

J
Jiri Denemark 已提交
2023
    if (jobInfo->type == VIR_DOMAIN_JOB_COMPLETED) {
2024
        qemuDomainJobInfoUpdateDowntime(jobInfo);
2025 2026 2027
        VIR_FREE(priv->job.completed);
        if (VIR_ALLOC(priv->job.completed) == 0)
            *priv->job.completed = *jobInfo;
2028
        return 0;
J
Jiri Denemark 已提交
2029
    } else if (jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) {
2030 2031 2032
        /* The migration was aborted by us rather than QEMU itself so let's
         * update the job type and notify the caller to send migrate_cancel.
         */
J
Jiri Denemark 已提交
2033
        jobInfo->type = VIR_DOMAIN_JOB_FAILED;
2034 2035
        return -2;
    } else {
2036
        return -1;
2037
    }
2038 2039 2040
}


2041
static int
2042
qemuDomainMigrateGraphicsRelocate(virQEMUDriverPtr driver,
2043
                                  virDomainObjPtr vm,
2044 2045
                                  qemuMigrationCookiePtr cookie,
                                  const char *graphicsuri)
2046 2047
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
2048 2049
    int ret = -1;
    const char *listenAddress = NULL;
2050
    virSocketAddr addr;
2051 2052 2053 2054 2055
    virURIPtr uri = NULL;
    int type = -1;
    int port = -1;
    int tlsPort = -1;
    const char *tlsSubject = NULL;
2056

2057
    if (!cookie || (!cookie->graphics && !graphicsuri))
2058 2059
        return 0;

2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
    if (graphicsuri && !(uri = virURIParse(graphicsuri)))
        goto cleanup;

    if (cookie->graphics) {
        type = cookie->graphics->type;

        listenAddress = cookie->graphics->listen;

        if (!listenAddress ||
            (virSocketAddrParse(&addr, listenAddress, AF_UNSPEC) > 0 &&
             virSocketAddrIsWildcard(&addr)))
            listenAddress = cookie->remoteHostname;

        port = cookie->graphics->port;
        tlsPort = cookie->graphics->tlsPort;
        tlsSubject = cookie->graphics->tlsSubject;
    }

    if (uri) {
2079
        size_t i;
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106

        if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("unknown graphics type %s"), uri->scheme);
            goto cleanup;
        }

        if (uri->server)
            listenAddress = uri->server;
        if (uri->port > 0)
            port = uri->port;

        for (i = 0; i < uri->paramsCount; i++) {
            virURIParamPtr param = uri->params + i;

            if (STRCASEEQ(param->name, "tlsPort")) {
                if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
                    virReportError(VIR_ERR_INVALID_ARG,
                                   _("invalid tlsPort number: %s"),
                                   param->value);
                    goto cleanup;
                }
            } else if (STRCASEEQ(param->name, "tlsSubject")) {
                tlsSubject = param->value;
            }
        }
    }
2107 2108 2109 2110

    /* QEMU doesn't support VNC relocation yet, so
     * skip it to avoid generating an error
     */
2111 2112 2113 2114
    if (type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
        ret = 0;
        goto cleanup;
    }
2115

2116 2117 2118 2119
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
        ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
                                          port, tlsPort, tlsSubject);
2120
        qemuDomainObjExitMonitor(driver, vm);
2121
    }
2122

2123
 cleanup:
2124
    virURIFree(uri);
2125 2126 2127 2128
    return ret;
}


2129
static int
2130
qemuDomainMigrateOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
2131 2132 2133
                             virDomainObjPtr vm,
                             qemuMigrationCookiePtr cookie)
{
2134 2135
    virDomainNetDefPtr netptr;
    int ret = -1;
2136
    size_t i;
2137 2138 2139 2140 2141 2142 2143 2144

    for (i = 0; i < cookie->network->nnets; i++) {
        netptr = vm->def->nets[i];

        switch (cookie->network->net[i].vporttype) {
        case VIR_NETDEV_VPORT_PROFILE_NONE:
        case VIR_NETDEV_VPORT_PROFILE_8021QBG:
        case VIR_NETDEV_VPORT_PROFILE_8021QBH:
2145
           break;
2146
        case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
2147 2148 2149 2150 2151 2152 2153 2154
            if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
                                                   netptr->ifname) != 0) {
                virReportSystemError(VIR_ERR_INTERNAL_ERROR,
                                     _("Unable to run command to set OVS port data for "
                                     "interface %s"), netptr->ifname);
                goto cleanup;
            }
            break;
2155 2156 2157 2158 2159
        default:
            break;
        }
    }

2160
    ret = 0;
2161
 cleanup:
2162 2163 2164 2165
    return ret;
}


2166 2167 2168 2169 2170 2171
/* This is called for outgoing non-p2p migrations when a connection to the
 * client which initiated the migration was closed but we were waiting for it
 * to follow up with the next phase, that is, in between
 * qemuDomainMigrateBegin3 and qemuDomainMigratePerform3 or
 * qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
 */
2172 2173 2174 2175
static virDomainObjPtr
qemuMigrationCleanup(virDomainObjPtr vm,
                     virConnectPtr conn,
                     void *opaque)
2176
{
2177
    virQEMUDriverPtr driver = opaque;
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
              vm->def->name, conn,
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
              qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
                                              priv->job.phase));

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
        goto cleanup;

    VIR_DEBUG("The connection which started outgoing migration of domain %s"
              " was closed; canceling the migration",
              vm->def->name);

2193
    switch ((qemuMigrationJobPhase) priv->job.phase) {
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223
    case QEMU_MIGRATION_PHASE_BEGIN3:
        /* just forget we were about to migrate */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
        VIR_WARN("Migration of domain %s finished but we don't know if the"
                 " domain was successfully started on destination or not",
                 vm->def->name);
        /* clear the job and let higher levels decide what to do */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3:
        /* cannot be seen without an active migration API; unreachable */
    case QEMU_MIGRATION_PHASE_CONFIRM3:
    case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
        /* all done; unreachable */
    case QEMU_MIGRATION_PHASE_PREPARE:
    case QEMU_MIGRATION_PHASE_FINISH2:
    case QEMU_MIGRATION_PHASE_FINISH3:
        /* incoming migration; unreachable */
    case QEMU_MIGRATION_PHASE_PERFORM2:
        /* single phase outgoing migration; unreachable */
    case QEMU_MIGRATION_PHASE_NONE:
    case QEMU_MIGRATION_PHASE_LAST:
        /* unreachable */
        ;
    }

2224
 cleanup:
2225 2226 2227
    return vm;
}

2228

2229
/* The caller is supposed to lock the vm and start a migration job. */
2230 2231
static char
*qemuMigrationBeginPhase(virQEMUDriverPtr driver,
2232
                         virDomainObjPtr vm,
2233
                         const char *xmlin,
2234
                         const char *dname,
2235
                         char **cookieout,
2236 2237
                         int *cookieoutlen,
                         unsigned long flags)
2238 2239 2240
{
    char *rv = NULL;
    qemuMigrationCookiePtr mig = NULL;
2241
    virDomainDefPtr def = NULL;
2242
    qemuDomainObjPrivatePtr priv = vm->privateData;
2243
    virCapsPtr caps = NULL;
2244
    unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_LOCKSTATE;
2245
    bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
2246

2247
    VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, dname=%s,"
2248
              " cookieout=%p, cookieoutlen=%p, flags=%lx",
2249
              driver, vm, NULLSTR(xmlin), NULLSTR(dname),
2250
              cookieout, cookieoutlen, flags);
2251

2252 2253 2254
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

2255 2256 2257 2258 2259 2260
    /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
     * Otherwise we will start the async job later in the perform phase losing
     * change protection.
     */
    if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
2261

2262
    if (!qemuMigrationIsAllowed(driver, vm, NULL, true, abort_on_error))
2263 2264
        goto cleanup;

2265 2266 2267
    if (!(flags & VIR_MIGRATE_UNSAFE) && !qemuMigrationIsSafe(vm->def))
        goto cleanup;

2268
    if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
2269
        virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DRIVE_MIRROR)) {
2270
        /* TODO support NBD for TUNNELLED migration */
2271
        if (flags & VIR_MIGRATE_TUNNELLED) {
2272 2273 2274 2275
            VIR_WARN("NBD in tunnelled migration is currently not supported");
        } else {
            cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
            priv->nbdPort = 0;
2276 2277 2278
        }
    }

2279
    if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0)))
2280 2281 2282 2283
        goto cleanup;

    if (qemuMigrationBakeCookie(mig, driver, vm,
                                cookieout, cookieoutlen,
2284
                                cookieFlags) < 0)
2285 2286
        goto cleanup;

L
liguang 已提交
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (flags & VIR_MIGRATE_TUNNELLED) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
    }

2309
    if (xmlin) {
2310
        if (!(def = virDomainDefParseString(xmlin, caps, driver->xmlopt,
M
Matthias Bolte 已提交
2311
                                            QEMU_EXPECTED_VIRT_TYPES,
2312 2313 2314
                                            VIR_DOMAIN_XML_INACTIVE)))
            goto cleanup;

2315
        if (!qemuDomainDefCheckABIStability(driver, vm->def, def))
2316 2317
            goto cleanup;

2318
        rv = qemuDomainDefFormatLive(driver, def, false, true);
2319
    } else {
2320
        rv = qemuDomainDefFormatLive(driver, vm->def, false, true);
2321
    }
2322

2323
 cleanup:
2324
    qemuMigrationCookieFree(mig);
2325
    virObjectUnref(caps);
2326
    virDomainDefFree(def);
2327 2328 2329
    return rv;
}

2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
char *
qemuMigrationBegin(virConnectPtr conn,
                   virDomainObjPtr vm,
                   const char *xmlin,
                   const char *dname,
                   char **cookieout,
                   int *cookieoutlen,
                   unsigned long flags)
{
    virQEMUDriverPtr driver = conn->privateData;
    char *xml = NULL;
2341
    qemuDomainAsyncJob asyncJob;
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
    } else {
        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_NONE;
    }

2353 2354
    qemuMigrationStoreDomainState(vm);

2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
        goto endjob;
    }

    /* Check if there is any ejected media.
     * We don't want to require them on the destination.
     */
    if (!(flags & VIR_MIGRATE_OFFLINE) &&
        qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
        goto endjob;

    if (!(xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
                                        cookieout, cookieoutlen,
                                        flags)))
        goto endjob;

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
        /* We keep the job active across API calls until the confirm() call.
         * This prevents any other APIs being invoked while migration is taking
         * place.
         */
2378 2379
        if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
                                 qemuMigrationCleanup) < 0)
2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
            goto endjob;
        if (qemuMigrationJobContinue(vm) == 0) {
            vm = NULL;
            virReportError(VIR_ERR_OPERATION_FAILED,
                           "%s", _("domain disappeared"));
            VIR_FREE(xml);
            if (cookieout)
                VIR_FREE(*cookieout);
        }
    } else {
        goto endjob;
    }

2393
 cleanup:
2394 2395 2396 2397
    if (vm)
        virObjectUnlock(vm);
    return xml;

2398
 endjob:
2399 2400 2401 2402
    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
        if (qemuMigrationJobFinish(driver, vm) == 0)
            vm = NULL;
    } else {
E
Eric Blake 已提交
2403
        if (!qemuDomainObjEndJob(driver, vm))
2404 2405 2406 2407 2408
            vm = NULL;
    }
    goto cleanup;
}

2409

2410 2411
/* Prepare is the first step, and it runs on the destination host.
 */
2412

2413
static void
2414
qemuMigrationPrepareCleanup(virQEMUDriverPtr driver,
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
                            virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
              driver,
              vm->def->name,
              qemuDomainJobTypeToString(priv->job.active),
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob));

2425 2426 2427
    virPortAllocatorRelease(driver->migrationPorts, priv->migrationPort);
    priv->migrationPort = 0;

2428 2429 2430 2431 2432
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
        return;
    qemuDomainObjDiscardAsyncJob(driver, vm);
}

2433
static int
2434
qemuMigrationPrepareAny(virQEMUDriverPtr driver,
2435 2436 2437 2438 2439
                        virConnectPtr dconn,
                        const char *cookiein,
                        int cookieinlen,
                        char **cookieout,
                        int *cookieoutlen,
2440
                        virDomainDefPtr *def,
2441
                        const char *origname,
L
liguang 已提交
2442
                        virStreamPtr st,
2443 2444
                        unsigned short port,
                        bool autoPort,
2445
                        const char *listenAddress,
L
liguang 已提交
2446
                        unsigned long flags)
2447 2448
{
    virDomainObjPtr vm = NULL;
2449
    virObjectEventPtr event = NULL;
2450
    int ret = -1;
2451
    int dataFD[2] = { -1, -1 };
2452
    qemuDomainObjPrivatePtr priv = NULL;
J
Jiri Denemark 已提交
2453
    unsigned long long now;
2454
    qemuMigrationCookiePtr mig = NULL;
2455
    bool tunnel = !!st;
J
Jiri Denemark 已提交
2456
    char *xmlout = NULL;
L
liguang 已提交
2457
    unsigned int cookieFlags;
2458
    virCapsPtr caps = NULL;
J
Ján Tomko 已提交
2459
    char *migrateFrom = NULL;
2460
    bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
2461
    bool taint_hook = false;
2462

2463
    if (virTimeMillisNow(&now) < 0)
2464 2465
        return -1;

L
liguang 已提交
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (tunnel) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
    }

2488 2489 2490
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

2491
    if (!qemuMigrationIsAllowed(driver, NULL, *def, true, abort_on_error))
2492 2493
        goto cleanup;

J
Jiri Denemark 已提交
2494 2495 2496 2497 2498
    /* Let migration hook filter domain XML */
    if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
        char *xml;
        int hookret;

2499
        if (!(xml = qemuDomainDefFormatXML(driver, *def,
2500 2501
                                           VIR_DOMAIN_XML_SECURE |
                                           VIR_DOMAIN_XML_MIGRATABLE)))
J
Jiri Denemark 已提交
2502 2503
            goto cleanup;

2504
        hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, (*def)->name,
J
Jiri Denemark 已提交
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
                              VIR_HOOK_QEMU_OP_MIGRATE, VIR_HOOK_SUBOP_BEGIN,
                              NULL, xml, &xmlout);
        VIR_FREE(xml);

        if (hookret < 0) {
            goto cleanup;
        } else if (hookret == 0) {
            if (!*xmlout) {
                VIR_DEBUG("Migrate hook filter returned nothing; using the"
                          " original XML");
            } else {
                virDomainDefPtr newdef;

                VIR_DEBUG("Using hook-filtered domain XML: %s", xmlout);
2519
                newdef = virDomainDefParseString(xmlout, caps, driver->xmlopt,
J
Jiri Denemark 已提交
2520 2521 2522 2523 2524
                                                 QEMU_EXPECTED_VIRT_TYPES,
                                                 VIR_DOMAIN_XML_INACTIVE);
                if (!newdef)
                    goto cleanup;

2525
                if (!qemuDomainDefCheckABIStability(driver, *def, newdef)) {
J
Jiri Denemark 已提交
2526 2527 2528 2529
                    virDomainDefFree(newdef);
                    goto cleanup;
                }

2530 2531
                virDomainDefFree(*def);
                *def = newdef;
2532 2533 2534 2535
                /* We should taint the domain here. However, @vm and therefore
                 * privateData too are still NULL, so just notice the fact and
                 * taint it later. */
                taint_hook = true;
J
Jiri Denemark 已提交
2536 2537 2538 2539
            }
        }
    }

J
Ján Tomko 已提交
2540 2541 2542 2543
    if (tunnel) {
        /* QEMU will be started with -incoming stdio
         * (which qemu_command might convert to exec:cat or fd:n)
         */
2544
        if (VIR_STRDUP(migrateFrom, "stdio") < 0)
J
Ján Tomko 已提交
2545 2546
            goto cleanup;
    } else {
2547 2548 2549 2550
        virSocketAddr listenAddressSocket;
        bool encloseAddress = false;
        bool hostIPv6Capable = false;
        bool qemuIPv6Capable = false;
J
Ján Tomko 已提交
2551 2552 2553 2554 2555
        virQEMUCapsPtr qemuCaps = NULL;
        struct addrinfo *info = NULL;
        struct addrinfo hints = { .ai_flags = AI_ADDRCONFIG,
                                  .ai_socktype = SOCK_STREAM };

2556 2557 2558 2559
        if (getaddrinfo("::", NULL, &hints, &info) == 0) {
            freeaddrinfo(info);
            hostIPv6Capable = true;
        }
J
Ján Tomko 已提交
2560
        if (!(qemuCaps = virQEMUCapsCacheLookupCopy(driver->qemuCapsCache,
2561
                                                    (*def)->emulator)))
J
Ján Tomko 已提交
2562 2563
            goto cleanup;

2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590
        qemuIPv6Capable = virQEMUCapsGet(qemuCaps, QEMU_CAPS_IPV6_MIGRATION);
        virObjectUnref(qemuCaps);

        if (listenAddress) {
            if (virSocketAddrIsNumeric(listenAddress)) {
                /* listenAddress is numeric IPv4 or IPv6 */
                if (virSocketAddrParse(&listenAddressSocket, listenAddress, AF_UNSPEC) < 0)
                    goto cleanup;

                /* address parsed successfully */
                if (VIR_SOCKET_ADDR_IS_FAMILY(&listenAddressSocket, AF_INET6)) {
                    if (!qemuIPv6Capable) {
                        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                                       _("qemu isn't capable of IPv6"));
                        goto cleanup;
                    }
                    if (!hostIPv6Capable) {
                        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                                       _("host isn't capable of IPv6"));
                        goto cleanup;
                    }
                    /* IPv6 address must be escaped in brackets on the cmd line */
                    encloseAddress = true;
                }
            } else {
                /* listenAddress is a hostname */
            }
J
Ján Tomko 已提交
2591
        } else {
2592 2593 2594 2595 2596
            /* Listen on :: instead of 0.0.0.0 if QEMU understands it
             * and there is at least one IPv6 address configured
             */
            listenAddress = qemuIPv6Capable && hostIPv6Capable ?
                encloseAddress = true, "::" : "0.0.0.0";
J
Ján Tomko 已提交
2597 2598
        }

2599 2600
        /* QEMU will be started with -incoming [<IPv6 addr>]:port,
         * -incoming <IPv4 addr>:port or -incoming <hostname>:port
J
Ján Tomko 已提交
2601
         */
2602 2603 2604 2605
        if ((encloseAddress &&
             virAsprintf(&migrateFrom, "tcp:[%s]:%d", listenAddress, port) < 0) ||
            (!encloseAddress &&
             virAsprintf(&migrateFrom, "tcp:%s:%d", listenAddress, port) < 0))
J
Ján Tomko 已提交
2606 2607 2608
            goto cleanup;
    }

2609
    if (!(vm = virDomainObjListAdd(driver->domains, *def,
2610
                                   driver->xmlopt,
2611 2612 2613
                                   VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
                                   VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
                                   NULL)))
2614
        goto cleanup;
2615

2616
    *def = NULL;
2617
    priv = vm->privateData;
2618 2619
    if (VIR_STRDUP(priv->origname, origname) < 0)
        goto cleanup;
2620

2621 2622 2623 2624 2625
    if (taint_hook) {
        /* Domain XML has been altered by a hook script. */
        priv->hookRun = true;
    }

2626
    if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
2627 2628
                                       QEMU_MIGRATION_COOKIE_LOCKSTATE |
                                       QEMU_MIGRATION_COOKIE_NBD)))
2629 2630
        goto cleanup;

2631
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
2632
        goto cleanup;
2633
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
2634 2635 2636 2637

    /* Domain starts inactive, even if the domain XML had an id field. */
    vm->def->id = -1;

L
liguang 已提交
2638 2639 2640
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

2641 2642
    if (tunnel &&
        (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
2643 2644
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
2645 2646 2647 2648
        goto endjob;
    }

    /* Start the QEMU daemon, with the same command-line arguments plus
2649
     * -incoming $migrateFrom
2650
     */
2651 2652
    if (qemuProcessStart(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
                         migrateFrom, dataFD[0], NULL, NULL,
2653 2654
                         VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
                         VIR_QEMU_PROCESS_START_PAUSED |
2655
                         VIR_QEMU_PROCESS_START_AUTODESTROY) < 0) {
2656
        virDomainAuditStart(vm, "migrated", false);
2657 2658 2659
        goto endjob;
    }

2660 2661 2662 2663
    if (tunnel) {
        if (virFDStreamOpen(st, dataFD[1]) < 0) {
            virReportSystemError(errno, "%s",
                                 _("cannot pass pipe for tunnelled migration"));
2664
            goto stop;
2665
        }
2666
        dataFD[1] = -1; /* 'st' owns the FD now & will close it */
2667 2668
    }

2669 2670 2671 2672 2673
    if (flags & VIR_MIGRATE_COMPRESSED &&
        qemuMigrationSetCompression(driver, vm,
                                    QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
        goto stop;

2674 2675 2676 2677 2678 2679 2680 2681
    if (mig->lockState) {
        VIR_DEBUG("Received lockstate %s", mig->lockState);
        VIR_FREE(priv->lockState);
        priv->lockState = mig->lockState;
        mig->lockState = NULL;
    } else {
        VIR_DEBUG("Received no lockstate");
    }
2682

2683
 done:
L
liguang 已提交
2684 2685 2686 2687 2688
    if (flags & VIR_MIGRATE_OFFLINE)
        cookieFlags = 0;
    else
        cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS;

2689 2690 2691
    if (mig->nbd &&
        flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
        virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
2692
        if (qemuMigrationStartNBDServer(driver, vm, listenAddress) < 0) {
2693 2694
            /* error already reported */
            goto endjob;
2695
        }
2696
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
2697 2698 2699 2700
    }

    if (qemuMigrationBakeCookie(mig, driver, vm, cookieout,
                                cookieoutlen, cookieFlags) < 0) {
2701 2702 2703 2704 2705 2706 2707
        /* We could tear down the whole guest here, but
         * cookie data is (so far) non-critical, so that
         * seems a little harsh. We'll just warn for now.
         */
        VIR_WARN("Unable to encode migration cookie");
    }

2708 2709 2710
    if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
        goto endjob;

L
liguang 已提交
2711 2712
    if (!(flags & VIR_MIGRATE_OFFLINE)) {
        virDomainAuditStart(vm, "migrated", true);
2713
        event = virDomainEventLifecycleNewFromObj(vm,
L
liguang 已提交
2714 2715 2716
                                         VIR_DOMAIN_EVENT_STARTED,
                                         VIR_DOMAIN_EVENT_STARTED_MIGRATED);
    }
2717

2718 2719 2720 2721
    /* We keep the job active across API calls until the finish() call.
     * This prevents any other APIs being invoked while incoming
     * migration is taking place.
     */
2722
    if (!qemuMigrationJobContinue(vm)) {
2723
        vm = NULL;
2724 2725
        virReportError(VIR_ERR_OPERATION_FAILED,
                       "%s", _("domain disappeared"));
2726
        goto cleanup;
2727
    }
2728

2729 2730
    if (autoPort)
        priv->migrationPort = port;
2731
    ret = 0;
2732

2733
 cleanup:
J
Ján Tomko 已提交
2734
    VIR_FREE(migrateFrom);
J
Jiri Denemark 已提交
2735
    VIR_FREE(xmlout);
2736 2737
    VIR_FORCE_CLOSE(dataFD[0]);
    VIR_FORCE_CLOSE(dataFD[1]);
2738
    if (vm) {
2739
        if (ret < 0) {
2740 2741 2742
            virPortAllocatorRelease(driver->remotePorts, priv->nbdPort);
            priv->nbdPort = 0;
        }
2743 2744 2745 2746
        if (ret >= 0 || vm->persistent)
            virObjectUnlock(vm);
        else
            qemuDomainRemoveInactive(driver, vm);
2747
    }
2748 2749
    if (event)
        qemuDomainEventQueue(driver, event);
2750
    qemuMigrationCookieFree(mig);
2751
    virObjectUnref(caps);
2752
    return ret;
2753

2754
 stop:
2755 2756 2757
    virDomainAuditStart(vm, "migrated", false);
    qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);

2758
 endjob:
2759
    if (!qemuMigrationJobFinish(driver, vm)) {
2760 2761 2762
        vm = NULL;
    }
    goto cleanup;
2763 2764 2765
}


2766 2767 2768 2769 2770
/*
 * This version starts an empty VM listening on a localhost TCP port, and
 * sets up the corresponding virStream to handle the incoming data.
 */
int
2771
qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
2772 2773 2774 2775 2776 2777
                           virConnectPtr dconn,
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           virStreamPtr st,
2778
                           virDomainDefPtr *def,
2779
                           const char *origname,
L
liguang 已提交
2780
                           unsigned long flags)
2781 2782 2783 2784
{
    int ret;

    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2785
              "cookieout=%p, cookieoutlen=%p, st=%p, def=%p, "
2786
              "origname=%s, flags=%lx",
2787
              driver, dconn, NULLSTR(cookiein), cookieinlen,
2788
              cookieout, cookieoutlen, st, *def, origname, flags);
2789

2790 2791 2792 2793 2794 2795
    if (st == NULL) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("tunnelled migration requested but NULL stream passed"));
        return -1;
    }

2796
    ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
2797
                                  cookieout, cookieoutlen, def, origname,
2798
                                  st, 0, false, NULL, flags);
2799 2800 2801 2802
    return ret;
}


2803
int
2804
qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
2805
                           virConnectPtr dconn,
2806 2807 2808 2809
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
2810 2811
                           const char *uri_in,
                           char **uri_out,
2812
                           virDomainDefPtr *def,
2813
                           const char *origname,
2814
                           const char *listenAddress,
L
liguang 已提交
2815
                           unsigned long flags)
2816
{
2817 2818
    unsigned short port = 0;
    bool autoPort = true;
2819 2820
    char *hostname = NULL;
    const char *p;
J
Ján Tomko 已提交
2821
    char *uri_str = NULL;
2822
    int ret = -1;
2823
    virURIPtr uri = NULL;
2824
    bool well_formed_uri = true;
2825 2826
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    const char *migrateHost = cfg->migrateHost;
J
Jiri Denemark 已提交
2827

2828 2829
    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
              "cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
2830
              "def=%p, origname=%s, listenAddress=%s, flags=%lx",
2831 2832
              driver, dconn, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
2833
              *def, origname, NULLSTR(listenAddress), flags);
2834

2835 2836
    *uri_out = NULL;

2837 2838 2839
    /* The URI passed in may be NULL or a string "tcp://somehostname:port".
     *
     * If the URI passed in is NULL then we allocate a port number
2840 2841 2842
     * from our pool of port numbers, and if the migrateHost is configured,
     * we return a URI of "tcp://migrateHost:port", otherwise return a URI
     * of "tcp://ourhostname:port".
2843 2844 2845 2846 2847 2848
     *
     * If the URI passed in is not NULL then we try to parse out the
     * port number and use that (note that the hostname is assumed
     * to be a correct hostname which refers to the target machine).
     */
    if (uri_in == NULL) {
2849
        if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2850
            goto cleanup;
2851

2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
        if (migrateHost != NULL) {
            if (virSocketAddrIsNumeric(migrateHost) &&
                virSocketAddrParse(NULL, migrateHost, AF_UNSPEC) < 0)
                goto cleanup;

           if (VIR_STRDUP(hostname, migrateHost) < 0)
                goto cleanup;
        } else {
            if ((hostname = virGetHostname()) == NULL)
                goto cleanup;
        }
2863 2864

        if (STRPREFIX(hostname, "localhost")) {
2865 2866 2867
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("hostname on destination resolved to localhost,"
                             " but migration requires an FQDN"));
2868 2869 2870 2871 2872
            goto cleanup;
        }

        /* XXX this really should have been a properly well-formed
         * URI, but we can't add in tcp:// now without breaking
2873
         * compatibility with old targets. We at least make the
2874 2875 2876
         * new targets accept both syntaxes though.
         */
        /* Caller frees */
2877
        if (virAsprintf(uri_out, "tcp:%s:%d", hostname, port) < 0)
2878 2879 2880 2881 2882 2883
            goto cleanup;
    } else {
        /* Check the URI starts with "tcp:".  We will escape the
         * URI when passing it to the qemu monitor, so bad
         * characters in hostname part don't matter.
         */
J
Ján Tomko 已提交
2884
        if (!(p = STRSKIP(uri_in, "tcp:"))) {
2885 2886 2887
            virReportError(VIR_ERR_INVALID_ARG, "%s",
                           _("only tcp URIs are supported for KVM/QEMU"
                             " migrations"));
2888 2889 2890
            goto cleanup;
        }

J
Ján Tomko 已提交
2891 2892
        /* Convert uri_in to well-formed URI with // after tcp: */
        if (!(STRPREFIX(uri_in, "tcp://"))) {
2893
            well_formed_uri = false;
2894
            if (virAsprintf(&uri_str, "tcp://%s", p) < 0)
J
Ján Tomko 已提交
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
                goto cleanup;
        }

        uri = virURIParse(uri_str ? uri_str : uri_in);
        VIR_FREE(uri_str);

        if (uri == NULL) {
            virReportError(VIR_ERR_INVALID_ARG, _("unable to parse URI: %s"),
                           uri_in);
            goto cleanup;
        }

        if (uri->server == NULL) {
            virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
                                                  " URI: %s"), uri_in);
            goto cleanup;
        }

        if (uri->port == 0) {
2914
            if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2915
                goto cleanup;
2916

2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
            if (well_formed_uri) {
                uri->port = port;

                /* Caller frees */
                if (!(*uri_out = virURIFormat(uri)))
                    goto cleanup;
            } else {
                /* Caller frees */
                if (virAsprintf(uri_out, "%s:%d", uri_in, port) < 0)
                    goto cleanup;
            }
2928 2929

        } else {
2930 2931
            port = uri->port;
            autoPort = false;
2932 2933 2934 2935 2936 2937
        }
    }

    if (*uri_out)
        VIR_DEBUG("Generated uri_out=%s", *uri_out);

2938
    ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
2939
                                  cookieout, cookieoutlen, def, origname,
2940
                                  NULL, port, autoPort, listenAddress, flags);
2941
 cleanup:
2942
    virURIFree(uri);
2943
    VIR_FREE(hostname);
2944
    virObjectUnref(cfg);
2945
    if (ret != 0) {
2946
        VIR_FREE(*uri_out);
2947 2948 2949
        if (autoPort)
            virPortAllocatorRelease(driver->migrationPorts, port);
    }
2950 2951 2952 2953
    return ret;
}


2954 2955 2956
virDomainDefPtr
qemuMigrationPrepareDef(virQEMUDriverPtr driver,
                        const char *dom_xml,
2957 2958
                        const char *dname,
                        char **origname)
2959 2960 2961
{
    virCapsPtr caps = NULL;
    virDomainDefPtr def;
2962
    char *name = NULL;
2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978

    if (!dom_xml) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("no domain XML passed"));
        return NULL;
    }

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        return NULL;

    if (!(def = virDomainDefParseString(dom_xml, caps, driver->xmlopt,
                                        QEMU_EXPECTED_VIRT_TYPES,
                                        VIR_DOMAIN_XML_INACTIVE)))
        goto cleanup;

    if (dname) {
2979
        name = def->name;
2980 2981 2982 2983 2984 2985
        if (VIR_STRDUP(def->name, dname) < 0) {
            virDomainDefFree(def);
            def = NULL;
        }
    }

2986
 cleanup:
2987
    virObjectUnref(caps);
2988 2989 2990 2991
    if (def && origname)
        *origname = name;
    else
        VIR_FREE(name);
2992 2993 2994 2995
    return def;
}


2996 2997 2998 2999 3000 3001 3002 3003 3004 3005
static int
qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
                          virConnectPtr conn,
                          virDomainObjPtr vm,
                          const char *cookiein,
                          int cookieinlen,
                          unsigned int flags,
                          int retcode)
{
    qemuMigrationCookiePtr mig;
3006
    virObjectEventPtr event = NULL;
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021
    int rv = -1;
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);

    VIR_DEBUG("driver=%p, conn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
              "flags=%x, retcode=%d",
              driver, conn, vm, NULLSTR(cookiein), cookieinlen,
              flags, retcode);

    virCheckFlags(QEMU_MIGRATION_FLAGS, -1);

    qemuMigrationJobSetPhase(driver, vm,
                             retcode == 0
                             ? QEMU_MIGRATION_PHASE_CONFIRM3
                             : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);

3022 3023
    if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
                                       QEMU_MIGRATION_COOKIE_STATS)))
3024 3025
        goto cleanup;

3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
    /* Update total times with the values sent by the destination daemon */
    if (mig->jobInfo) {
        qemuDomainObjPrivatePtr priv = vm->privateData;
        if (priv->job.completed) {
            qemuDomainJobInfoPtr jobInfo = priv->job.completed;
            if (mig->jobInfo->status.downtime_set) {
                jobInfo->status.downtime = mig->jobInfo->status.downtime;
                jobInfo->status.downtime_set = true;
            }
            if (mig->jobInfo->timeElapsed)
                jobInfo->timeElapsed = mig->jobInfo->timeElapsed;
        } else {
            priv->job.completed = mig->jobInfo;
            mig->jobInfo = NULL;
        }
    }

3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

    /* Did the migration go as planned?  If yes, kill off the
     * domain object, but if no, resume CPUs
     */
    if (retcode == 0) {
        /* If guest uses SPICE and supports seamless migration we have to hold
         * up domain shutdown until SPICE server transfers its data */
        qemuMigrationWaitForSpice(driver, vm);

        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
        virDomainAuditStop(vm, "migrated");

3058
        event = virDomainEventLifecycleNewFromObj(vm,
3059 3060 3061 3062 3063 3064 3065
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
    } else {

        /* cancel any outstanding NBD jobs */
        qemuMigrationCancelDriveMirror(mig, driver, vm);

3066 3067 3068 3069
        if (qemuMigrationRestoreDomainState(conn, vm)) {
            event = virDomainEventLifecycleNewFromObj(vm,
                                                      VIR_DOMAIN_EVENT_RESUMED,
                                                      VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
3070 3071 3072 3073 3074 3075 3076 3077
        }

        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) {
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
            goto cleanup;
        }
    }

3078
 done:
3079 3080 3081
    qemuMigrationCookieFree(mig);
    rv = 0;

3082
 cleanup:
3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
    if (event)
        qemuDomainEventQueue(driver, event);
    virObjectUnref(cfg);
    return rv;
}

int
qemuMigrationConfirm(virConnectPtr conn,
                     virDomainObjPtr vm,
                     const char *cookiein,
                     int cookieinlen,
                     unsigned int flags,
                     int cancelled)
{
    virQEMUDriverPtr driver = conn->privateData;
3098
    qemuMigrationJobPhase phase;
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
    virQEMUDriverConfigPtr cfg = NULL;
    int ret = -1;

    cfg = virQEMUDriverGetConfig(driver);

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
        goto cleanup;

    if (cancelled)
        phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
    else
        phase = QEMU_MIGRATION_PHASE_CONFIRM3;

    qemuMigrationJobStartPhase(driver, vm, phase);
3113 3114
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
                           qemuMigrationCleanup);
3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129

    ret = qemuMigrationConfirmPhase(driver, conn, vm,
                                    cookiein, cookieinlen,
                                    flags, cancelled);

    if (qemuMigrationJobFinish(driver, vm) == 0) {
        vm = NULL;
    } else if (!virDomainObjIsActive(vm) &&
               (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
        if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
        qemuDomainRemoveInactive(driver, vm);
        vm = NULL;
    }

3130
 cleanup:
3131 3132 3133 3134 3135 3136 3137
    if (vm)
        virObjectUnlock(vm);
    virObjectUnref(cfg);
    return ret;
}


3138 3139
enum qemuMigrationDestinationType {
    MIGRATION_DEST_HOST,
3140
    MIGRATION_DEST_CONNECT_HOST,
3141
    MIGRATION_DEST_UNIX,
3142
    MIGRATION_DEST_FD,
3143
};
3144

3145 3146 3147 3148
enum qemuMigrationForwardType {
    MIGRATION_FWD_DIRECT,
    MIGRATION_FWD_STREAM,
};
3149

3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
typedef struct _qemuMigrationSpec qemuMigrationSpec;
typedef qemuMigrationSpec *qemuMigrationSpecPtr;
struct _qemuMigrationSpec {
    enum qemuMigrationDestinationType destType;
    union {
        struct {
            const char *name;
            int port;
        } host;

        struct {
3161
            char *file;
3162 3163
            int sock;
        } unix_socket;
3164 3165 3166 3167 3168

        struct {
            int qemu;
            int local;
        } fd;
3169 3170 3171 3172 3173 3174 3175
    } dest;

    enum qemuMigrationForwardType fwdType;
    union {
        virStreamPtr stream;
    } fwd;
};
3176 3177 3178

#define TUNNEL_SEND_BUF_SIZE 65536

3179 3180 3181 3182 3183 3184 3185
typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
struct _qemuMigrationIOThread {
    virThread thread;
    virStreamPtr st;
    int sock;
    virError err;
3186 3187
    int wakeupRecvFD;
    int wakeupSendFD;
3188 3189 3190
};

static void qemuMigrationIOFunc(void *arg)
3191
{
3192
    qemuMigrationIOThreadPtr data = arg;
3193 3194 3195 3196 3197 3198 3199
    char *buffer = NULL;
    struct pollfd fds[2];
    int timeout = -1;
    virErrorPtr err = NULL;

    VIR_DEBUG("Running migration tunnel; stream=%p, sock=%d",
              data->st, data->sock);
3200

3201
    if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0)
3202
        goto abrt;
3203

3204 3205 3206
    fds[0].fd = data->sock;
    fds[1].fd = data->wakeupRecvFD;

3207
    for (;;) {
3208 3209 3210 3211 3212 3213 3214 3215 3216 3217
        int ret;

        fds[0].events = fds[1].events = POLLIN;
        fds[0].revents = fds[1].revents = 0;

        ret = poll(fds, ARRAY_CARDINALITY(fds), timeout);

        if (ret < 0) {
            if (errno == EAGAIN || errno == EINTR)
                continue;
3218
            virReportSystemError(errno, "%s",
3219 3220
                                 _("poll failed in migration tunnel"));
            goto abrt;
3221
        }
3222 3223 3224 3225 3226 3227 3228

        if (ret == 0) {
            /* We were asked to gracefully stop but reading would block. This
             * can only happen if qemu told us migration finished but didn't
             * close the migration fd. We handle this in the same way as EOF.
             */
            VIR_DEBUG("QEMU forgot to close migration fd");
3229
            break;
3230
        }
3231

3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247
        if (fds[1].revents & (POLLIN | POLLERR | POLLHUP)) {
            char stop = 0;

            if (saferead(data->wakeupRecvFD, &stop, 1) != 1) {
                virReportSystemError(errno, "%s",
                                     _("failed to read from wakeup fd"));
                goto abrt;
            }

            VIR_DEBUG("Migration tunnel was asked to %s",
                      stop ? "abort" : "finish");
            if (stop) {
                goto abrt;
            } else {
                timeout = 0;
            }
3248 3249
        }

3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
        if (fds[0].revents & (POLLIN | POLLERR | POLLHUP)) {
            int nbytes;

            nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
            if (nbytes > 0) {
                if (virStreamSend(data->st, buffer, nbytes) < 0)
                    goto error;
            } else if (nbytes < 0) {
                virReportSystemError(errno, "%s",
                        _("tunnelled migration failed to read from qemu"));
                goto abrt;
            } else {
                /* EOF; get out of here */
                break;
            }
        }
    }
3267

3268 3269
    if (virStreamFinish(data->st) < 0)
        goto error;
3270

3271 3272
    VIR_FREE(buffer);

3273 3274
    return;

3275
 abrt:
3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286
    err = virSaveLastError();
    if (err && err->code == VIR_ERR_OK) {
        virFreeError(err);
        err = NULL;
    }
    virStreamAbort(data->st);
    if (err) {
        virSetError(err);
        virFreeError(err);
    }

3287
 error:
3288 3289
    virCopyLastError(&data->err);
    virResetLastError();
3290
    VIR_FREE(buffer);
3291 3292 3293 3294 3295 3296 3297
}


static qemuMigrationIOThreadPtr
qemuMigrationStartTunnel(virStreamPtr st,
                         int sock)
{
3298 3299
    qemuMigrationIOThreadPtr io = NULL;
    int wakeupFD[2] = { -1, -1 };
3300

3301 3302 3303 3304
    if (pipe2(wakeupFD, O_CLOEXEC) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to make pipe"));
        goto error;
3305 3306
    }

3307
    if (VIR_ALLOC(io) < 0)
3308
        goto error;
3309

3310 3311
    io->st = st;
    io->sock = sock;
3312 3313
    io->wakeupRecvFD = wakeupFD[0];
    io->wakeupSendFD = wakeupFD[1];
3314 3315 3316 3317 3318 3319

    if (virThreadCreate(&io->thread, true,
                        qemuMigrationIOFunc,
                        io) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to create migration thread"));
3320
        goto error;
3321 3322 3323
    }

    return io;
3324

3325
 error:
3326 3327 3328 3329
    VIR_FORCE_CLOSE(wakeupFD[0]);
    VIR_FORCE_CLOSE(wakeupFD[1]);
    VIR_FREE(io);
    return NULL;
3330 3331 3332
}

static int
3333
qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io, bool error)
3334 3335
{
    int rv = -1;
3336 3337 3338 3339 3340 3341 3342 3343 3344
    char stop = error ? 1 : 0;

    /* make sure the thread finishes its job and is joinable */
    if (safewrite(io->wakeupSendFD, &stop, 1) != 1) {
        virReportSystemError(errno, "%s",
                             _("failed to wakeup migration tunnel"));
        goto cleanup;
    }

3345 3346 3347 3348
    virThreadJoin(&io->thread);

    /* Forward error from the IO thread, to this thread */
    if (io->err.code != VIR_ERR_OK) {
3349 3350 3351 3352
        if (error)
            rv = 0;
        else
            virSetError(&io->err);
3353 3354 3355 3356 3357 3358
        virResetError(&io->err);
        goto cleanup;
    }

    rv = 0;

3359
 cleanup:
3360 3361
    VIR_FORCE_CLOSE(io->wakeupSendFD);
    VIR_FORCE_CLOSE(io->wakeupRecvFD);
3362 3363
    VIR_FREE(io);
    return rv;
3364 3365
}

3366
static int
3367
qemuMigrationConnect(virQEMUDriverPtr driver,
3368 3369 3370 3371 3372 3373 3374 3375 3376
                     virDomainObjPtr vm,
                     qemuMigrationSpecPtr spec)
{
    virNetSocketPtr sock;
    const char *host;
    char *port = NULL;
    int ret = -1;

    host = spec->dest.host.name;
3377
    if (virAsprintf(&port, "%d", spec->dest.host.port) < 0)
3378 3379 3380 3381 3382 3383 3384 3385 3386
        return -1;

    spec->destType = MIGRATION_DEST_FD;
    spec->dest.fd.qemu = -1;

    if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
        goto cleanup;
    if (virNetSocketNewConnectTCP(host, port, &sock) == 0) {
        spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
3387
        virObjectUnref(sock);
3388 3389 3390 3391 3392
    }
    if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
        spec->dest.fd.qemu == -1)
        goto cleanup;

3393 3394 3395 3396 3397 3398 3399
    /* Migration expects a blocking FD */
    if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
        virReportSystemError(errno, _("Unable to set FD %d blocking"),
                             spec->dest.fd.qemu);
        goto cleanup;
    }

3400 3401
    ret = 0;

3402
 cleanup:
3403 3404 3405 3406 3407 3408
    VIR_FREE(port);
    if (ret < 0)
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
    return ret;
}

3409
static int
3410
qemuMigrationRun(virQEMUDriverPtr driver,
3411 3412 3413 3414 3415 3416 3417
                 virDomainObjPtr vm,
                 const char *cookiein,
                 int cookieinlen,
                 char **cookieout,
                 int *cookieoutlen,
                 unsigned long flags,
                 unsigned long resource,
3418
                 qemuMigrationSpecPtr spec,
3419 3420
                 virConnectPtr dconn,
                 const char *graphicsuri)
3421
{
3422
    int ret = -1;
3423 3424
    unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
    qemuDomainObjPrivatePtr priv = vm->privateData;
3425
    qemuMigrationCookiePtr mig = NULL;
3426
    qemuMigrationIOThreadPtr iothread = NULL;
3427
    int fd = -1;
3428
    unsigned long migrate_speed = resource ? resource : priv->migMaxBandwidth;
3429
    virErrorPtr orig_err = NULL;
3430
    unsigned int cookieFlags = 0;
3431
    bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
3432
    int rc;
3433 3434 3435

    VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
              "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
3436
              "spec=%p (dest=%d, fwd=%d), dconn=%p, graphicsuri=%s",
3437 3438
              driver, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, resource,
3439 3440
              spec, spec->destType, spec->fwdType, dconn,
              NULLSTR(graphicsuri));
3441

3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
    if (flags & VIR_MIGRATE_NON_SHARED_DISK) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

    if (flags & VIR_MIGRATE_NON_SHARED_INC) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

3452 3453
    if (virLockManagerPluginUsesState(driver->lockManager) &&
        !cookieout) {
3454 3455 3456 3457
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Migration with lock driver %s requires"
                         " cookie support"),
                       virLockManagerPluginGetName(driver->lockManager));
3458 3459 3460
        return -1;
    }

3461 3462 3463
    mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
                                 cookieFlags | QEMU_MIGRATION_COOKIE_GRAPHICS);
    if (!mig)
3464 3465
        goto cleanup;

3466
    if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
3467 3468
        VIR_WARN("unable to provide data for graphics client relocation");

3469 3470 3471 3472 3473 3474 3475
    /* this will update migrate_flags on success */
    if (qemuMigrationDriveMirror(driver, vm, mig, spec->dest.host.name,
                                 migrate_speed, &migrate_flags) < 0) {
        /* error reported by helper func */
        goto cleanup;
    }

3476
    /* Before EnterMonitor, since qemuMigrationSetOffline already does that */
3477 3478 3479 3480 3481 3482
    if (!(flags & VIR_MIGRATE_LIVE) &&
        virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
        if (qemuMigrationSetOffline(driver, vm) < 0)
            goto cleanup;
    }

3483 3484 3485 3486 3487
    if (flags & VIR_MIGRATE_COMPRESSED &&
        qemuMigrationSetCompression(driver, vm,
                                    QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
        goto cleanup;

3488 3489 3490 3491 3492
    if (flags & VIR_MIGRATE_AUTO_CONVERGE &&
        qemuMigrationSetAutoConverge(driver, vm,
                                     QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
        goto cleanup;

3493 3494
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3495 3496
        goto cleanup;

3497 3498 3499 3500
    if (priv->job.asyncAbort) {
        /* explicitly do this *after* we entered the monitor,
         * as this is a critical section so we are guaranteed
         * priv->job.asyncAbort will not change */
3501
        qemuDomainObjExitMonitor(driver, vm);
3502 3503 3504 3505 3506 3507
        virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                       qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                       _("canceled by client"));
        goto cleanup;
    }

3508
    if (qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0) {
3509
        qemuDomainObjExitMonitor(driver, vm);
3510 3511
        goto cleanup;
    }
3512

3513 3514
    /* connect to the destination qemu if needed */
    if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
3515
        qemuMigrationConnect(driver, vm, spec) < 0) {
3516
        qemuDomainObjExitMonitor(driver, vm);
3517
        goto cleanup;
3518
    }
3519

3520 3521 3522 3523 3524 3525 3526
    switch (spec->destType) {
    case MIGRATION_DEST_HOST:
        ret = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
                                       spec->dest.host.name,
                                       spec->dest.host.port);
        break;

3527 3528 3529 3530
    case MIGRATION_DEST_CONNECT_HOST:
        /* handled above and transformed into MIGRATION_DEST_FD */
        break;

3531
    case MIGRATION_DEST_UNIX:
3532
        if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
3533 3534 3535 3536 3537 3538 3539 3540 3541
            ret = qemuMonitorMigrateToUnix(priv->mon, migrate_flags,
                                           spec->dest.unix_socket.file);
        } else {
            const char *args[] = {
                "nc", "-U", spec->dest.unix_socket.file, NULL
            };
            ret = qemuMonitorMigrateToCommand(priv->mon, migrate_flags, args);
        }
        break;
3542 3543

    case MIGRATION_DEST_FD:
3544
        if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3545
            fd = spec->dest.fd.local;
3546 3547
            spec->dest.fd.local = -1;
        }
3548 3549 3550 3551
        ret = qemuMonitorMigrateToFd(priv->mon, migrate_flags,
                                     spec->dest.fd.qemu);
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
        break;
3552
    }
3553
    qemuDomainObjExitMonitor(driver, vm);
3554
    if (ret < 0)
3555 3556
        goto cleanup;
    ret = -1;
3557 3558

    if (!virDomainObjIsActive(vm)) {
3559 3560
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
3561 3562 3563 3564 3565 3566
        goto cleanup;
    }

    /* From this point onwards we *must* call cancel to abort the
     * migration on source if anything goes wrong */

3567 3568 3569 3570 3571 3572 3573 3574
    if (spec->destType == MIGRATION_DEST_UNIX) {
        /* It is also possible that the migrate didn't fail initially, but
         * rather failed later on.  Check its status before waiting for a
         * connection from qemu which may never be initiated.
         */
        if (qemuMigrationUpdateJobStatus(driver, vm, _("migration job"),
                                         QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
            goto cancel;
3575

3576 3577 3578 3579 3580 3581 3582
        while ((fd = accept(spec->dest.unix_socket.sock, NULL, NULL)) < 0) {
            if (errno == EAGAIN || errno == EINTR)
                continue;
            virReportSystemError(errno, "%s",
                                 _("failed to accept connection from qemu"));
            goto cancel;
        }
3583 3584
    }

3585 3586
    if (spec->fwdType != MIGRATION_FWD_DIRECT &&
        !(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
3587 3588
        goto cancel;

3589 3590 3591 3592 3593 3594
    rc = qemuMigrationWaitForCompletion(driver, vm,
                                        QEMU_ASYNC_JOB_MIGRATION_OUT,
                                        dconn, abort_on_error);
    if (rc == -2)
        goto cancel;
    else if (rc == -1)
3595
        goto cleanup;
3596

3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
    /* When migration completed, QEMU will have paused the
     * CPUs for us, but unless we're using the JSON monitor
     * we won't have been notified of this, so might still
     * think we're running. For v2 protocol this doesn't
     * matter because we'll kill the VM soon, but for v3
     * this is important because we stay paused until the
     * confirm3 step, but need to release the lock state
     */
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
        if (qemuMigrationSetOffline(driver, vm) < 0)
            goto cleanup;
    }

3610
    ret = 0;
3611

3612
 cleanup:
3613 3614 3615
    if (ret < 0 && !orig_err)
        orig_err = virSaveLastError();

3616
    /* cancel any outstanding NBD jobs */
3617 3618
    if (mig)
        qemuMigrationCancelDriveMirror(mig, driver, vm);
3619

3620
    if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3621
        if (iothread && qemuMigrationStopTunnel(iothread, ret < 0) < 0)
3622
            ret = -1;
3623
        VIR_FORCE_CLOSE(fd);
3624
    }
3625

3626
    if (priv->job.completed) {
3627
        qemuDomainJobInfoUpdateTime(priv->job.completed);
3628 3629
        qemuDomainJobInfoUpdateDowntime(priv->job.completed);
    }
3630

3631 3632
    cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK |
                   QEMU_MIGRATION_COOKIE_STATS;
3633 3634
    if (flags & VIR_MIGRATE_PERSIST_DEST)
        cookieFlags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
3635
    if (ret == 0 &&
3636 3637
        qemuMigrationBakeCookie(mig, driver, vm, cookieout,
                                cookieoutlen, cookieFlags) < 0) {
3638
        VIR_WARN("Unable to encode migration cookie");
3639
    }
3640

3641 3642
    qemuMigrationCookieFree(mig);

3643 3644 3645 3646 3647
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

3648 3649
    return ret;

3650
 cancel:
3651 3652
    orig_err = virSaveLastError();

3653
    if (virDomainObjIsActive(vm)) {
3654 3655
        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
3656
            qemuMonitorMigrateCancel(priv->mon);
3657
            qemuDomainObjExitMonitor(driver, vm);
3658
        }
3659
    }
3660 3661 3662 3663 3664 3665
    goto cleanup;
}

/* Perform migration using QEMU's native TCP migrate support,
 * not encrypted obviously
 */
3666
static int doNativeMigrate(virQEMUDriverPtr driver,
3667 3668 3669 3670 3671 3672 3673
                           virDomainObjPtr vm,
                           const char *uri,
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           unsigned long flags,
3674
                           unsigned long resource,
3675 3676
                           virConnectPtr dconn,
                           const char *graphicsuri)
3677
{
3678
    qemuDomainObjPrivatePtr priv = vm->privateData;
M
Martin Kletzander 已提交
3679
    virURIPtr uribits = NULL;
3680
    int ret = -1;
3681 3682 3683
    qemuMigrationSpec spec;

    VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
3684 3685
              "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
              "graphicsuri=%s",
3686
              driver, vm, uri, NULLSTR(cookiein), cookieinlen,
3687 3688
              cookieout, cookieoutlen, flags, resource,
              NULLSTR(graphicsuri));
3689 3690

    if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
3691
        char *tmp;
3692
        /* HACK: source host generates bogus URIs, so fix them up */
3693
        if (virAsprintf(&tmp, "tcp://%s", uri + strlen("tcp:")) < 0)
3694
            return -1;
M
Martin Kletzander 已提交
3695
        uribits = virURIParse(tmp);
3696
        VIR_FREE(tmp);
3697
    } else {
M
Martin Kletzander 已提交
3698
        uribits = virURIParse(uri);
3699
    }
3700
    if (!uribits)
3701 3702
        return -1;

3703
    if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD))
3704 3705
        spec.destType = MIGRATION_DEST_CONNECT_HOST;
    else
3706
        spec.destType = MIGRATION_DEST_HOST;
3707 3708 3709
    spec.dest.host.name = uribits->server;
    spec.dest.host.port = uribits->port;
    spec.fwdType = MIGRATION_FWD_DIRECT;
3710

3711
    ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
3712 3713
                           cookieoutlen, flags, resource, &spec, dconn,
                           graphicsuri);
3714 3715 3716 3717

    if (spec.destType == MIGRATION_DEST_FD)
        VIR_FORCE_CLOSE(spec.dest.fd.qemu);

3718
    virURIFree(uribits);
3719 3720 3721 3722 3723

    return ret;
}


3724
static int doTunnelMigrate(virQEMUDriverPtr driver,
3725 3726 3727 3728 3729 3730 3731
                           virDomainObjPtr vm,
                           virStreamPtr st,
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           unsigned long flags,
3732
                           unsigned long resource,
3733 3734
                           virConnectPtr dconn,
                           const char *graphicsuri)
3735 3736
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
3737
    virNetSocketPtr sock = NULL;
3738 3739
    int ret = -1;
    qemuMigrationSpec spec;
3740
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
3741 3742

    VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
3743 3744
              "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
              "graphicsuri=%s",
3745
              driver, vm, st, NULLSTR(cookiein), cookieinlen,
3746 3747
              cookieout, cookieoutlen, flags, resource,
              NULLSTR(graphicsuri));
3748

3749 3750 3751
    if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
        !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
        !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
3752 3753
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Source qemu is too old to support tunnelled migration"));
3754
        virObjectUnref(cfg);
3755
        return -1;
3756 3757 3758 3759 3760
    }

    spec.fwdType = MIGRATION_FWD_STREAM;
    spec.fwd.stream = st;

3761
    if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD)) {
3762 3763 3764 3765 3766 3767
        int fds[2];

        spec.destType = MIGRATION_DEST_FD;
        spec.dest.fd.qemu = -1;
        spec.dest.fd.local = -1;

3768
        if (pipe2(fds, O_CLOEXEC) == 0) {
3769 3770 3771 3772
            spec.dest.fd.qemu = fds[1];
            spec.dest.fd.local = fds[0];
        }
        if (spec.dest.fd.qemu == -1 ||
3773
            virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
3774
                                              spec.dest.fd.qemu) < 0) {
3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785
            virReportSystemError(errno, "%s",
                        _("cannot create pipe for tunnelled migration"));
            goto cleanup;
        }
    } else {
        spec.destType = MIGRATION_DEST_UNIX;
        spec.dest.unix_socket.sock = -1;
        spec.dest.unix_socket.file = NULL;

        if (virAsprintf(&spec.dest.unix_socket.file,
                        "%s/qemu.tunnelmigrate.src.%s",
3786
                        cfg->libDir, vm->def->name) < 0)
3787 3788 3789
            goto cleanup;

        if (virNetSocketNewListenUNIX(spec.dest.unix_socket.file, 0700,
3790
                                      cfg->user, cfg->group,
3791 3792 3793 3794 3795 3796 3797
                                      &sock) < 0 ||
            virNetSocketListen(sock, 1) < 0)
            goto cleanup;

        spec.dest.unix_socket.sock = virNetSocketGetFD(sock);
    }

3798
    ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
3799 3800
                           cookieoutlen, flags, resource, &spec, dconn,
                           graphicsuri);
3801

3802
 cleanup:
3803 3804 3805 3806
    if (spec.destType == MIGRATION_DEST_FD) {
        VIR_FORCE_CLOSE(spec.dest.fd.qemu);
        VIR_FORCE_CLOSE(spec.dest.fd.local);
    } else {
3807
        virObjectUnref(sock);
3808 3809
        VIR_FREE(spec.dest.unix_socket.file);
    }
3810

3811
    virObjectUnref(cfg);
3812 3813 3814 3815
    return ret;
}


3816 3817 3818 3819
/* This is essentially a re-impl of virDomainMigrateVersion2
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
3820
static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
3821
                               virConnectPtr sconn ATTRIBUTE_UNUSED,
3822 3823
                               virConnectPtr dconn,
                               virDomainObjPtr vm,
3824
                               const char *dconnuri,
3825 3826 3827
                               unsigned long flags,
                               const char *dname,
                               unsigned long resource)
3828 3829 3830
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
3831
    char *cookie = NULL;
3832 3833 3834
    char *dom_xml = NULL;
    int cookielen = 0, ret;
    virErrorPtr orig_err = NULL;
3835
    bool cancelled;
3836
    virStreamPtr st = NULL;
3837
    unsigned long destflags;
3838

3839
    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
3840
              "flags=%lx, dname=%s, resource=%lu",
3841 3842
              driver, sconn, dconn, vm, NULLSTR(dconnuri),
              flags, NULLSTR(dname), resource);
3843

3844 3845 3846 3847 3848
    /* In version 2 of the protocol, the prepare step is slightly
     * different.  We fetch the domain XML of the source domain
     * and pass it to Prepare2.
     */
    if (!(dom_xml = qemuDomainFormatXML(driver, vm,
3849 3850
                                        QEMU_DOMAIN_FORMAT_LIVE_FLAGS |
                                        VIR_DOMAIN_XML_MIGRATABLE)))
3851 3852 3853 3854 3855
        return -1;

    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

3856 3857
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
3858

3859 3860 3861 3862 3863 3864 3865 3866 3867 3868
    VIR_DEBUG("Prepare2 %p", dconn);
    if (flags & VIR_MIGRATE_TUNNELLED) {
        /*
         * Tunnelled Migrate Version 2 does not support cookies
         * due to missing parameters in the prepareTunnel() API.
         */

        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

3869
        qemuDomainObjEnterRemote(vm);
3870
        ret = dconn->driver->domainMigratePrepareTunnel
3871
            (dconn, st, destflags, dname, resource, dom_xml);
3872
        qemuDomainObjExitRemote(vm);
3873
    } else {
3874
        qemuDomainObjEnterRemote(vm);
3875 3876
        ret = dconn->driver->domainMigratePrepare2
            (dconn, &cookie, &cookielen, NULL, &uri_out,
3877
             destflags, dname, resource, dom_xml);
3878
        qemuDomainObjExitRemote(vm);
3879 3880 3881
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
3882 3883 3884
        goto cleanup;

    /* the domain may have shutdown or crashed while we had the locks dropped
3885
     * in qemuDomainObjEnterRemote, so check again
3886 3887
     */
    if (!virDomainObjIsActive(vm)) {
3888 3889
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
3890 3891 3892
        goto cleanup;
    }

3893 3894
    if (!(flags & VIR_MIGRATE_TUNNELLED) &&
        (uri_out == NULL)) {
3895 3896
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare2 did not set uri"));
3897
        cancelled = true;
3898
        orig_err = virSaveLastError();
3899
        goto finish;
3900 3901
    }

3902 3903 3904 3905
    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete.
     */
    VIR_DEBUG("Perform %p", sconn);
3906
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
3907
    if (flags & VIR_MIGRATE_TUNNELLED)
3908 3909
        ret = doTunnelMigrate(driver, vm, st,
                              NULL, 0, NULL, NULL,
3910
                              flags, resource, dconn, NULL);
3911 3912 3913 3914
    else
        ret = doNativeMigrate(driver, vm, uri_out,
                              cookie, cookielen,
                              NULL, NULL, /* No out cookie with v2 migration */
3915
                              flags, resource, dconn, NULL);
3916 3917 3918 3919

    /* Perform failed. Make sure Finish doesn't overwrite the error */
    if (ret < 0)
        orig_err = virSaveLastError();
3920

3921 3922 3923
    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
3924
    cancelled = ret < 0;
3925

3926
 finish:
3927 3928 3929 3930
    /* In version 2 of the migration protocol, we pass the
     * status code from the sender to the destination host,
     * so it can do any cleanup if the migration failed.
     */
3931
    dname = dname ? dname : vm->def->name;
3932
    VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
3933
    qemuDomainObjEnterRemote(vm);
3934
    ddomain = dconn->driver->domainMigrateFinish2
3935
        (dconn, dname, cookie, cookielen,
3936
         uri_out ? uri_out : dconnuri, destflags, cancelled);
3937
    qemuDomainObjExitRemote(vm);
3938 3939
    if (cancelled && ddomain)
        VIR_ERROR(_("finish step ignored that migration was cancelled"));
3940

3941
 cleanup:
3942
    if (ddomain) {
3943
        virObjectUnref(ddomain);
3944 3945 3946 3947
        ret = 0;
    } else {
        ret = -1;
    }
3948

3949
    virObjectUnref(st);
3950 3951 3952 3953 3954 3955

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
3956
    VIR_FREE(cookie);
3957 3958

    return ret;
3959 3960 3961
}


3962 3963 3964 3965
/* This is essentially a re-impl of virDomainMigrateVersion3
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
3966 3967 3968 3969 3970 3971 3972 3973 3974
static int
doPeer2PeerMigrate3(virQEMUDriverPtr driver,
                    virConnectPtr sconn,
                    virConnectPtr dconn,
                    const char *dconnuri,
                    virDomainObjPtr vm,
                    const char *xmlin,
                    const char *dname,
                    const char *uri,
3975
                    const char *graphicsuri,
3976
                    const char *listenAddress,
3977 3978 3979
                    unsigned long long bandwidth,
                    bool useParams,
                    unsigned long flags)
3980 3981 3982 3983 3984 3985 3986 3987 3988 3989
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
    char *cookiein = NULL;
    char *cookieout = NULL;
    char *dom_xml = NULL;
    int cookieinlen = 0;
    int cookieoutlen = 0;
    int ret = -1;
    virErrorPtr orig_err = NULL;
3990
    bool cancelled = true;
3991
    virStreamPtr st = NULL;
3992
    unsigned long destflags;
3993 3994 3995 3996 3997
    virTypedParameterPtr params = NULL;
    int nparams = 0;
    int maxparams = 0;

    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, dconnuri=%s, vm=%p, xmlin=%s, "
3998 3999
              "dname=%s, uri=%s, graphicsuri=%s, listenAddress=%s, "
              "bandwidth=%llu, useParams=%d, flags=%lx",
4000
              driver, sconn, dconn, NULLSTR(dconnuri), vm, NULLSTR(xmlin),
4001 4002
              NULLSTR(dname), NULLSTR(uri), NULLSTR(graphicsuri),
              NULLSTR(listenAddress), bandwidth, useParams, flags);
4003

4004 4005 4006 4007 4008
    /* Unlike the virDomainMigrateVersion3 counterpart, we don't need
     * to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
     * bit here, because we are already running inside the context of
     * a single job.  */

4009 4010
    dom_xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
                                      &cookieout, &cookieoutlen, flags);
4011 4012 4013
    if (!dom_xml)
        goto cleanup;

4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033
    if (useParams) {
        if (virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_XML, dom_xml) < 0)
            goto cleanup;

        if (dname &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
            goto cleanup;

        if (uri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_URI, uri) < 0)
            goto cleanup;

        if (bandwidth &&
            virTypedParamsAddULLong(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_BANDWIDTH,
                                    bandwidth) < 0)
            goto cleanup;
4034 4035 4036 4037 4038 4039

        if (graphicsuri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_GRAPHICS_URI,
                                    graphicsuri) < 0)
            goto cleanup;
4040 4041 4042 4043 4044
        if (listenAddress &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_LISTEN_ADDRESS,
                                    listenAddress) < 0)
            goto cleanup;
4045 4046
    }

4047 4048 4049
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

4050 4051
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
4052

4053 4054 4055 4056 4057 4058 4059 4060 4061
    VIR_DEBUG("Prepare3 %p", dconn);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
    if (flags & VIR_MIGRATE_TUNNELLED) {
        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

4062
        qemuDomainObjEnterRemote(vm);
4063 4064 4065 4066 4067 4068 4069 4070 4071
        if (useParams) {
            ret = dconn->driver->domainMigratePrepareTunnel3Params
                (dconn, st, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepareTunnel3
                (dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 destflags, dname, bandwidth, dom_xml);
        }
4072
        qemuDomainObjExitRemote(vm);
4073
    } else {
4074
        qemuDomainObjEnterRemote(vm);
4075 4076 4077 4078 4079 4080 4081 4082 4083
        if (useParams) {
            ret = dconn->driver->domainMigratePrepare3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, &uri_out, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepare3
                (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 uri, &uri_out, destflags, dname, bandwidth, dom_xml);
        }
4084
        qemuDomainObjExitRemote(vm);
4085 4086 4087 4088 4089
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
        goto cleanup;

L
liguang 已提交
4090 4091 4092 4093
    if (flags & VIR_MIGRATE_OFFLINE) {
        VIR_DEBUG("Offline migration, skipping Perform phase");
        VIR_FREE(cookieout);
        cookieoutlen = 0;
4094
        cancelled = false;
L
liguang 已提交
4095 4096 4097
        goto finish;
    }

4098 4099 4100 4101
    if (uri_out) {
        uri = uri_out;
        if (useParams &&
            virTypedParamsReplaceString(&params, &nparams,
4102 4103
                                        VIR_MIGRATE_PARAM_URI, uri_out) < 0) {
            orig_err = virSaveLastError();
4104
            goto finish;
4105
        }
4106
    } else if (!uri && !(flags & VIR_MIGRATE_TUNNELLED)) {
4107 4108
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare3 did not set uri"));
4109
        orig_err = virSaveLastError();
4110 4111 4112 4113 4114 4115 4116 4117
        goto finish;
    }

    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete. The src VM should remain
     * running, but in paused state until the destination can
     * confirm migration completion.
     */
4118
    VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri));
4119
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4120 4121 4122 4123 4124
    VIR_FREE(cookiein);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
4125
    if (flags & VIR_MIGRATE_TUNNELLED) {
4126 4127 4128
        ret = doTunnelMigrate(driver, vm, st,
                              cookiein, cookieinlen,
                              &cookieout, &cookieoutlen,
4129
                              flags, bandwidth, dconn, graphicsuri);
4130 4131
    } else {
        ret = doNativeMigrate(driver, vm, uri,
4132 4133
                              cookiein, cookieinlen,
                              &cookieout, &cookieoutlen,
4134
                              flags, bandwidth, dconn, graphicsuri);
4135
    }
4136 4137

    /* Perform failed. Make sure Finish doesn't overwrite the error */
4138
    if (ret < 0) {
4139
        orig_err = virSaveLastError();
4140 4141 4142 4143
    } else {
        qemuMigrationJobSetPhase(driver, vm,
                                 QEMU_MIGRATION_PHASE_PERFORM3_DONE);
    }
4144 4145 4146 4147

    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
4148
    cancelled = ret < 0;
4149

4150
 finish:
4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162
    /*
     * The status code from the source is passed to the destination.
     * The dest can cleanup in the source indicated it failed to
     * send all migration data. Returns NULL for ddomain if
     * the dest was unable to complete migration.
     */
    VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
    VIR_FREE(cookiein);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185

    if (useParams) {
        if (virTypedParamsGetString(params, nparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, NULL) <= 0 &&
            virTypedParamsReplaceString(&params, &nparams,
                                        VIR_MIGRATE_PARAM_DEST_NAME,
                                        vm->def->name) < 0) {
            ddomain = NULL;
        } else {
            qemuDomainObjEnterRemote(vm);
            ddomain = dconn->driver->domainMigrateFinish3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags, cancelled);
            qemuDomainObjExitRemote(vm);
        }
    } else {
        dname = dname ? dname : vm->def->name;
        qemuDomainObjEnterRemote(vm);
        ddomain = dconn->driver->domainMigrateFinish3
            (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
             dconnuri, uri, destflags, cancelled);
        qemuDomainObjExitRemote(vm);
    }
4186 4187
    if (cancelled && ddomain)
        VIR_ERROR(_("finish step ignored that migration was cancelled"));
4188

4189 4190 4191 4192 4193 4194 4195
    /* If ddomain is NULL, then we were unable to start
     * the guest on the target, and must restart on the
     * source. There is a small chance that the ddomain
     * is NULL due to an RPC failure, in which case
     * ddomain could in fact be running on the dest.
     * The lock manager plugins should take care of
     * safety in this scenario.
4196
     */
4197
    cancelled = ddomain == NULL;
4198

4199 4200 4201 4202 4203 4204
    /* If finish3 set an error, and we don't have an earlier
     * one we need to preserve it in case confirm3 overwrites
     */
    if (!orig_err)
        orig_err = virSaveLastError();

4205 4206 4207 4208
    /*
     * If cancelled, then src VM will be restarted, else
     * it will be killed
     */
4209
    VIR_DEBUG("Confirm3 %p cancelled=%d vm=%p", sconn, cancelled, vm);
4210 4211 4212 4213 4214
    VIR_FREE(cookiein);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
4215 4216 4217
    ret = qemuMigrationConfirmPhase(driver, sconn, vm,
                                    cookiein, cookieinlen,
                                    flags, cancelled);
4218 4219 4220 4221
    /* If Confirm3 returns -1, there's nothing more we can
     * do, but fortunately worst case is that there is a
     * domain left in 'paused' state on source.
     */
4222 4223 4224
    if (ret < 0)
        VIR_WARN("Guest %s probably left in 'paused' state on source",
                 vm->def->name);
4225 4226 4227

 cleanup:
    if (ddomain) {
4228
        virObjectUnref(ddomain);
4229 4230 4231 4232 4233
        ret = 0;
    } else {
        ret = -1;
    }

4234
    virObjectUnref(st);
4235 4236 4237 4238 4239 4240 4241 4242

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
    VIR_FREE(cookiein);
    VIR_FREE(cookieout);
4243
    virTypedParamsFree(params, nparams);
4244 4245 4246 4247
    return ret;
}


4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259
static int virConnectCredType[] = {
    VIR_CRED_AUTHNAME,
    VIR_CRED_PASSPHRASE,
};


static virConnectAuth virConnectAuthConfig = {
    .credtype = virConnectCredType,
    .ncredtype = ARRAY_CARDINALITY(virConnectCredType),
};


4260
static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
4261
                              virConnectPtr sconn,
4262
                              virDomainObjPtr vm,
4263
                              const char *xmlin,
4264
                              const char *dconnuri,
4265
                              const char *uri,
4266
                              const char *graphicsuri,
4267
                              const char *listenAddress,
4268 4269
                              unsigned long flags,
                              const char *dname,
4270 4271
                              unsigned long resource,
                              bool *v3proto)
4272 4273 4274 4275
{
    int ret = -1;
    virConnectPtr dconn = NULL;
    bool p2p;
4276
    virErrorPtr orig_err = NULL;
4277
    bool offline = false;
4278
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4279
    bool useParams;
4280

4281
    VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
4282 4283
              "uri=%s, graphicsuri=%s, listenAddress=%s, flags=%lx, "
              "dname=%s, resource=%lu",
4284
              driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4285 4286
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
              flags, NULLSTR(dname), resource);
4287 4288 4289 4290 4291

    /* the order of operations is important here; we make sure the
     * destination side is completely setup before we touch the source
     */

4292
    qemuDomainObjEnterRemote(vm);
4293
    dconn = virConnectOpenAuth(dconnuri, &virConnectAuthConfig, 0);
4294
    qemuDomainObjExitRemote(vm);
4295
    if (dconn == NULL) {
4296
        virReportError(VIR_ERR_OPERATION_FAILED,
4297 4298
                       _("Failed to connect to remote libvirt URI %s: %s"),
                       dconnuri, virGetLastErrorMessage());
4299
        virObjectUnref(cfg);
4300 4301 4302
        return -1;
    }

4303 4304
    if (virConnectSetKeepAlive(dconn, cfg->keepAliveInterval,
                               cfg->keepAliveCount) < 0)
4305 4306
        goto cleanup;

4307
    qemuDomainObjEnterRemote(vm);
4308 4309
    p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                   VIR_DRV_FEATURE_MIGRATION_P2P);
4310
        /* v3proto reflects whether the caller used Perform3, but with
4311
         * p2p migrate, regardless of whether Perform2 or Perform3
4312 4313 4314 4315
         * were used, we decide protocol based on what target supports
         */
    *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                        VIR_DRV_FEATURE_MIGRATION_V3);
4316 4317
    useParams = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                         VIR_DRV_FEATURE_MIGRATION_PARAMS);
L
liguang 已提交
4318 4319 4320
    if (flags & VIR_MIGRATE_OFFLINE)
        offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                           VIR_DRV_FEATURE_MIGRATION_OFFLINE);
4321
    qemuDomainObjExitRemote(vm);
4322

4323
    if (!p2p) {
4324 4325
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Destination libvirt does not support peer-to-peer migration protocol"));
4326 4327 4328
        goto cleanup;
    }

4329 4330
    /* Only xmlin, dname, uri, and bandwidth parameters can be used with
     * old-style APIs. */
4331
    if (!useParams && graphicsuri) {
4332 4333 4334 4335 4336 4337
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("Migration APIs with extensible parameters are not "
                         "supported but extended parameters were passed"));
        goto cleanup;
    }

L
liguang 已提交
4338 4339 4340 4341 4342 4343 4344
    if (flags & VIR_MIGRATE_OFFLINE && !offline) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("offline migration is not supported by "
                         "the destination host"));
        goto cleanup;
    }

4345
    /* domain may have been stopped while we were talking to remote daemon */
L
liguang 已提交
4346
    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
4347 4348
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
4349 4350 4351
        goto cleanup;
    }

4352 4353 4354 4355 4356 4357 4358
    /* Change protection is only required on the source side (us), and
     * only for v3 migration when begin and perform are separate jobs.
     * But peer-2-peer is already a single job, and we still want to
     * talk to older destinations that would reject the flag.
     * Therefore it is safe to clear the bit here.  */
    flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;

4359 4360
    if (*v3proto) {
        ret = doPeer2PeerMigrate3(driver, sconn, dconn, dconnuri, vm, xmlin,
4361 4362
                                  dname, uri, graphicsuri, listenAddress,
                                  resource, useParams, flags);
4363
    } else {
4364
        ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
4365
                                  dconnuri, flags, dname, resource);
4366
    }
4367

4368
 cleanup:
4369
    orig_err = virSaveLastError();
4370
    qemuDomainObjEnterRemote(vm);
4371
    virObjectUnref(dconn);
4372
    qemuDomainObjExitRemote(vm);
4373 4374 4375 4376
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
4377
    virObjectUnref(cfg);
4378 4379 4380 4381
    return ret;
}


4382 4383 4384 4385 4386 4387
/*
 * This implements perform part of the migration protocol when migration job
 * does not need to be active across several APIs, i.e., peer2peer migration or
 * perform phase of v2 non-peer2peer migration.
 */
static int
4388
qemuMigrationPerformJob(virQEMUDriverPtr driver,
4389 4390 4391 4392 4393
                        virConnectPtr conn,
                        virDomainObjPtr vm,
                        const char *xmlin,
                        const char *dconnuri,
                        const char *uri,
4394
                        const char *graphicsuri,
4395
                        const char *listenAddress,
4396
                        const char *cookiein,
4397 4398 4399 4400 4401 4402 4403
                        int cookieinlen,
                        char **cookieout,
                        int *cookieoutlen,
                        unsigned long flags,
                        const char *dname,
                        unsigned long resource,
                        bool v3proto)
4404
{
4405
    virObjectEventPtr event = NULL;
4406
    int ret = -1;
4407
    virErrorPtr orig_err = NULL;
4408
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4409
    bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
4410

4411
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
4412 4413
        goto cleanup;

L
liguang 已提交
4414
    if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
4415 4416
        virReportError(VIR_ERR_OPERATION_INVALID,
                       "%s", _("domain is not running"));
4417 4418 4419
        goto endjob;
    }

4420
    if (!qemuMigrationIsAllowed(driver, vm, NULL, true, abort_on_error))
4421
        goto endjob;
4422

4423
    if (!(flags & VIR_MIGRATE_UNSAFE) && !qemuMigrationIsSafe(vm->def))
4424
        goto endjob;
4425

4426
    qemuMigrationStoreDomainState(vm);
4427 4428

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
4429
        ret = doPeer2PeerMigrate(driver, conn, vm, xmlin,
4430 4431
                                 dconnuri, uri, graphicsuri, listenAddress,
                                 flags, dname, resource, &v3proto);
4432
    } else {
4433 4434 4435
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
        ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
                              cookieout, cookieoutlen,
4436
                              flags, resource, NULL, NULL);
4437
    }
4438 4439
    if (ret < 0)
        goto endjob;
4440

4441 4442 4443 4444
    /*
     * In v3 protocol, the source VM is not killed off until the
     * confirm step.
     */
4445
    if (!v3proto) {
4446 4447
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
4448
        virDomainAuditStop(vm, "migrated");
4449
        event = virDomainEventLifecycleNewFromObj(vm,
4450 4451
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
4452 4453
    }

4454
 endjob:
4455 4456 4457
    if (ret < 0)
        orig_err = virSaveLastError();

4458
    if (qemuMigrationRestoreDomainState(conn, vm)) {
4459
        event = virDomainEventLifecycleNewFromObj(vm,
4460 4461 4462
                                         VIR_DOMAIN_EVENT_RESUMED,
                                         VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
    }
4463

4464
    if (!qemuMigrationJobFinish(driver, vm)) {
4465 4466 4467 4468 4469
        vm = NULL;
    } else if (!virDomainObjIsActive(vm) &&
               (!vm->persistent ||
                (ret == 0 && (flags & VIR_MIGRATE_UNDEFINE_SOURCE)))) {
        if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
4470
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
4471
        qemuDomainRemoveInactive(driver, vm);
4472 4473 4474
        vm = NULL;
    }

4475 4476 4477 4478 4479
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

4480
 cleanup:
4481
    if (vm)
4482
        virObjectUnlock(vm);
4483 4484
    if (event)
        qemuDomainEventQueue(driver, event);
4485
    virObjectUnref(cfg);
4486 4487 4488 4489 4490 4491 4492
    return ret;
}

/*
 * This implements perform phase of v3 migration protocol.
 */
static int
4493
qemuMigrationPerformPhase(virQEMUDriverPtr driver,
4494 4495 4496
                          virConnectPtr conn,
                          virDomainObjPtr vm,
                          const char *uri,
4497
                          const char *graphicsuri,
4498 4499 4500 4501 4502 4503 4504
                          const char *cookiein,
                          int cookieinlen,
                          char **cookieout,
                          int *cookieoutlen,
                          unsigned long flags,
                          unsigned long resource)
{
4505
    virObjectEventPtr event = NULL;
4506
    int ret = -1;
4507
    bool hasrefs;
4508 4509 4510 4511 4512 4513 4514 4515 4516 4517

    /* If we didn't start the job in the begin phase, start it now. */
    if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
            goto cleanup;
    } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
        goto cleanup;
    }

    qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4518 4519
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
                           qemuMigrationCleanup);
4520 4521 4522

    ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
                          cookieout, cookieoutlen,
4523
                          flags, resource, NULL, graphicsuri);
4524

4525 4526 4527 4528 4529
    if (ret < 0) {
        if (qemuMigrationRestoreDomainState(conn, vm)) {
            event = virDomainEventLifecycleNewFromObj(vm,
                                                      VIR_DOMAIN_EVENT_RESUMED,
                                                      VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
4530
        }
4531
        goto endjob;
4532
    }
4533 4534 4535

    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);

4536 4537
    if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
                             qemuMigrationCleanup) < 0)
4538 4539
        goto endjob;

4540
 endjob:
4541
    if (ret < 0)
4542
        hasrefs = qemuMigrationJobFinish(driver, vm);
4543
    else
4544 4545
        hasrefs = qemuMigrationJobContinue(vm);
    if (!hasrefs) {
4546 4547
        vm = NULL;
    } else if (!virDomainObjIsActive(vm) && !vm->persistent) {
4548
        qemuDomainRemoveInactive(driver, vm);
4549
        vm = NULL;
4550
    }
4551

4552
 cleanup:
4553
    if (vm)
4554
        virObjectUnlock(vm);
4555 4556 4557 4558 4559
    if (event)
        qemuDomainEventQueue(driver, event);
    return ret;
}

4560
int
4561
qemuMigrationPerform(virQEMUDriverPtr driver,
4562 4563 4564 4565 4566
                     virConnectPtr conn,
                     virDomainObjPtr vm,
                     const char *xmlin,
                     const char *dconnuri,
                     const char *uri,
4567
                     const char *graphicsuri,
4568
                     const char *listenAddress,
4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
                     const char *cookiein,
                     int cookieinlen,
                     char **cookieout,
                     int *cookieoutlen,
                     unsigned long flags,
                     const char *dname,
                     unsigned long resource,
                     bool v3proto)
{
    VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
4579
              "uri=%s, graphicsuri=%s, listenAddress=%s"
4580 4581
              "cookiein=%s, cookieinlen=%d, cookieout=%p, cookieoutlen=%p, "
              "flags=%lx, dname=%s, resource=%lu, v3proto=%d",
4582
              driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4583
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4584 4585
              NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen,
              flags, NULLSTR(dname), resource, v3proto);
4586 4587 4588

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
        if (cookieinlen) {
4589 4590
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("received unexpected cookie with P2P migration"));
4591 4592 4593 4594
            return -1;
        }

        return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri, uri,
4595 4596
                                       graphicsuri, listenAddress,
                                       cookiein, cookieinlen,
4597 4598
                                       cookieout, cookieoutlen,
                                       flags, dname, resource, v3proto);
4599 4600
    } else {
        if (dconnuri) {
4601 4602
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           "%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
4603 4604 4605 4606 4607
            return -1;
        }

        if (v3proto) {
            return qemuMigrationPerformPhase(driver, conn, vm, uri,
4608
                                             graphicsuri,
4609 4610
                                             cookiein, cookieinlen,
                                             cookieout, cookieoutlen,
4611
                                             flags, resource);
4612 4613
        } else {
            return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri,
4614
                                           uri, graphicsuri, listenAddress,
4615
                                           cookiein, cookieinlen,
4616 4617 4618 4619 4620
                                           cookieout, cookieoutlen, flags,
                                           dname, resource, v3proto);
        }
    }
}
4621

4622
static int
4623 4624
qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def)
{
4625
    size_t i;
4626 4627 4628 4629 4630
    int last_good_net = -1;
    virDomainNetDefPtr net;

    for (i = 0; i < def->nnets; i++) {
        net = def->nets[i];
4631
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4632
            if (virNetDevVPortProfileAssociate(net->ifname,
4633
                                               virDomainNetGetActualVirtPortProfile(net),
4634
                                               &net->mac,
4635
                                               virDomainNetGetActualDirectDev(net),
4636
                                               -1,
4637
                                               def->uuid,
4638 4639
                                               VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
                                               false) < 0) {
4640 4641 4642
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("Port profile Associate failed for %s"),
                               net->ifname);
4643
                goto err_exit;
4644 4645
            }
            VIR_DEBUG("Port profile Associate succeeded for %s", net->ifname);
4646

4647
            if (virNetDevMacVLanVPortProfileRegisterCallback(net->ifname, &net->mac,
4648 4649 4650 4651
                                                             virDomainNetGetActualDirectDev(net), def->uuid,
                                                             virDomainNetGetActualVirtPortProfile(net),
                                                             VIR_NETDEV_VPORT_PROFILE_OP_CREATE))
                goto err_exit;
4652 4653 4654 4655
        }
        last_good_net = i;
    }

4656
    return 0;
4657

4658
 err_exit:
4659
    for (i = 0; last_good_net != -1 && i < last_good_net; i++) {
4660
        net = def->nets[i];
4661
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4662
            ignore_value(virNetDevVPortProfileDisassociate(net->ifname,
4663
                                                           virDomainNetGetActualVirtPortProfile(net),
4664
                                                           &net->mac,
4665
                                                           virDomainNetGetActualDirectDev(net),
4666
                                                           -1,
4667
                                                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH));
4668 4669
        }
    }
4670
    return -1;
4671 4672 4673 4674
}


virDomainPtr
4675
qemuMigrationFinish(virQEMUDriverPtr driver,
4676 4677
                    virConnectPtr dconn,
                    virDomainObjPtr vm,
4678 4679 4680 4681
                    const char *cookiein,
                    int cookieinlen,
                    char **cookieout,
                    int *cookieoutlen,
4682
                    unsigned long flags,
4683 4684
                    int retcode,
                    bool v3proto)
4685 4686
{
    virDomainPtr dom = NULL;
4687
    virObjectEventPtr event = NULL;
4688
    bool newVM = true;
4689
    qemuMigrationCookiePtr mig = NULL;
4690
    virErrorPtr orig_err = NULL;
4691
    int cookie_flags = 0;
J
Jiri Denemark 已提交
4692
    qemuDomainObjPrivatePtr priv = vm->privateData;
4693
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4694
    virCapsPtr caps = NULL;
4695
    unsigned short port;
4696

4697
    VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
4698
              "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d",
4699 4700
              driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, retcode);
4701

4702 4703 4704
    port = priv->migrationPort;
    priv->migrationPort = 0;

4705 4706 4707
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

4708
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
4709 4710
        goto cleanup;

4711 4712 4713
    qemuMigrationJobStartPhase(driver, vm,
                               v3proto ? QEMU_MIGRATION_PHASE_FINISH3
                                       : QEMU_MIGRATION_PHASE_FINISH2);
4714

4715
    qemuDomainCleanupRemove(vm, qemuMigrationPrepareCleanup);
4716
    VIR_FREE(priv->job.completed);
4717

4718 4719
    cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
                   QEMU_MIGRATION_COOKIE_STATS;
4720 4721 4722 4723 4724
    if (flags & VIR_MIGRATE_PERSIST_DEST)
        cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;

    if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein,
                                       cookieinlen, cookie_flags)))
4725
        goto endjob;
4726 4727 4728 4729 4730

    /* Did the migration go as planned?  If yes, return the domain
     * object, but if no, clean up the empty qemu process.
     */
    if (retcode == 0) {
L
liguang 已提交
4731
        if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
4732 4733
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("guest unexpectedly quit"));
4734
            goto endjob;
4735 4736
        }

4737 4738 4739 4740 4741
        if (mig->jobInfo) {
            priv->job.completed = mig->jobInfo;
            mig->jobInfo = NULL;
        }

L
liguang 已提交
4742 4743 4744 4745 4746
        if (!(flags & VIR_MIGRATE_OFFLINE)) {
            if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
                qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                                VIR_QEMU_PROCESS_STOP_MIGRATED);
                virDomainAuditStop(vm, "failed");
4747
                event = virDomainEventLifecycleNewFromObj(vm,
L
liguang 已提交
4748 4749 4750 4751 4752 4753 4754
                                                 VIR_DOMAIN_EVENT_STOPPED,
                                                 VIR_DOMAIN_EVENT_STOPPED_FAILED);
                goto endjob;
            }
            if (mig->network)
                if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
                    VIR_WARN("unable to provide network data for relocation");
4755
        }
4756

4757 4758
        qemuMigrationStopNBDServer(driver, vm, mig);

4759
        if (flags & VIR_MIGRATE_PERSIST_DEST) {
4760
            virDomainDefPtr vmdef;
4761
            if (vm->persistent)
4762
                newVM = false;
4763
            vm->persistent = 1;
4764 4765 4766
            if (mig->persistent)
                vm->newDef = vmdef = mig->persistent;
            else
4767
                vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm);
4768
            if (!vmdef || virDomainSaveConfig(cfg->configDir, vmdef) < 0) {
4769 4770 4771 4772 4773 4774 4775 4776 4777
                /* Hmpf.  Migration was successful, but making it persistent
                 * was not.  If we report successful, then when this domain
                 * shuts down, management tools are in for a surprise.  On the
                 * other hand, if we report failure, then the management tools
                 * might try to restart the domain on the source side, even
                 * though the domain is actually running on the destination.
                 * Return a NULL dom pointer, and hope that this is a rare
                 * situation and management tools are smart.
                 */
4778 4779

                /*
4780 4781
                 * However, in v3 protocol, the source VM is still available
                 * to restart during confirm() step, so we kill it off now.
4782 4783
                 */
                if (v3proto) {
L
liguang 已提交
4784 4785 4786 4787 4788
                    if (!(flags & VIR_MIGRATE_OFFLINE)) {
                        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                                        VIR_QEMU_PROCESS_STOP_MIGRATED);
                        virDomainAuditStop(vm, "failed");
                    }
4789 4790
                    if (newVM)
                        vm->persistent = 0;
4791
                }
A
Alex Jia 已提交
4792
                if (!vmdef)
4793 4794
                    virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                                   _("can't get vmdef"));
4795 4796 4797
                goto endjob;
            }

4798
            event = virDomainEventLifecycleNewFromObj(vm,
4799 4800 4801 4802 4803 4804 4805 4806 4807
                                             VIR_DOMAIN_EVENT_DEFINED,
                                             newVM ?
                                             VIR_DOMAIN_EVENT_DEFINED_ADDED :
                                             VIR_DOMAIN_EVENT_DEFINED_UPDATED);
            if (event)
                qemuDomainEventQueue(driver, event);
            event = NULL;
        }

L
liguang 已提交
4808
        if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
4809 4810 4811 4812
            /* run 'cont' on the destination, which allows migration on qemu
             * >= 0.10.6 to work properly.  This isn't strictly necessary on
             * older qemu's, but it also doesn't hurt anything there
             */
J
Jiri Denemark 已提交
4813
            if (qemuProcessStartCPUs(driver, vm, dconn,
4814 4815
                                     VIR_DOMAIN_RUNNING_MIGRATED,
                                     QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
4816
                if (virGetLastError() == NULL)
4817 4818
                    virReportError(VIR_ERR_INTERNAL_ERROR,
                                   "%s", _("resume operation failed"));
4819 4820 4821 4822 4823
                /* Need to save the current error, in case shutting
                 * down the process overwrites it
                 */
                orig_err = virSaveLastError();

4824 4825 4826 4827 4828 4829 4830 4831 4832
                /*
                 * In v3 protocol, the source VM is still available to
                 * restart during confirm() step, so we kill it off
                 * now.
                 * In v2 protocol, the source is dead, so we leave
                 * target in paused state, in case admin can fix
                 * things up
                 */
                if (v3proto) {
4833 4834
                    qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                                    VIR_QEMU_PROCESS_STOP_MIGRATED);
4835
                    virDomainAuditStop(vm, "failed");
4836
                    event = virDomainEventLifecycleNewFromObj(vm,
4837 4838 4839
                                                     VIR_DOMAIN_EVENT_STOPPED,
                                                     VIR_DOMAIN_EVENT_STOPPED_FAILED);
                }
4840 4841
                goto endjob;
            }
4842 4843 4844 4845
            if (priv->job.completed) {
                qemuDomainJobInfoUpdateTime(priv->job.completed);
                qemuDomainJobInfoUpdateDowntime(priv->job.completed);
            }
4846 4847
        }

4848
        dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
4849

L
liguang 已提交
4850
        if (!(flags & VIR_MIGRATE_OFFLINE)) {
4851
            event = virDomainEventLifecycleNewFromObj(vm,
L
liguang 已提交
4852 4853 4854 4855 4856 4857 4858
                                             VIR_DOMAIN_EVENT_RESUMED,
                                             VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
            if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
                virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
                                     VIR_DOMAIN_PAUSED_USER);
                if (event)
                    qemuDomainEventQueue(driver, event);
4859
                event = virDomainEventLifecycleNewFromObj(vm,
L
liguang 已提交
4860 4861 4862
                                                 VIR_DOMAIN_EVENT_SUSPENDED,
                                                 VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
            }
4863
        }
L
liguang 已提交
4864 4865

        if (virDomainObjIsActive(vm) &&
4866
            virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) {
4867 4868 4869
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
            goto endjob;
        }
4870 4871 4872

        /* Guest is successfully running, so cancel previous auto destroy */
        qemuProcessAutoDestroyRemove(driver, vm);
L
liguang 已提交
4873
    } else if (!(flags & VIR_MIGRATE_OFFLINE)) {
4874 4875
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
4876
        virDomainAuditStop(vm, "failed");
4877
        event = virDomainEventLifecycleNewFromObj(vm,
4878 4879 4880 4881
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_FAILED);
    }

4882 4883
    if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
                                QEMU_MIGRATION_COOKIE_STATS) < 0)
4884 4885
        VIR_WARN("Unable to encode migration cookie");

4886
 endjob:
E
Eric Blake 已提交
4887 4888 4889
    if (qemuMigrationJobFinish(driver, vm) == 0) {
        vm = NULL;
    } else if (!vm->persistent && !virDomainObjIsActive(vm)) {
4890
        qemuDomainRemoveInactive(driver, vm);
E
Eric Blake 已提交
4891
        vm = NULL;
4892
    }
4893

4894
 cleanup:
4895
    virPortAllocatorRelease(driver->migrationPorts, port);
J
Jiri Denemark 已提交
4896
    if (vm) {
4897 4898
        if (priv->mon)
            qemuMonitorSetDomainLog(priv->mon, -1);
J
Jiri Denemark 已提交
4899
        VIR_FREE(priv->origname);
4900
        virObjectUnlock(vm);
J
Jiri Denemark 已提交
4901
    }
4902 4903
    if (event)
        qemuDomainEventQueue(driver, event);
4904
    qemuMigrationCookieFree(mig);
4905 4906 4907 4908
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
4909
    virObjectUnref(caps);
4910
    virObjectUnref(cfg);
4911 4912
    return dom;
}
4913

4914

4915
/* Helper function called while vm is active.  */
4916
int
4917
qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
4918 4919
                    int fd, off_t offset, const char *path,
                    const char *compressor,
E
Eric Blake 已提交
4920
                    bool bypassSecurityDriver,
4921
                    qemuDomainAsyncJob asyncJob)
4922 4923 4924
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int rc;
4925
    int ret = -1;
4926
    bool restoreLabel = false;
4927 4928
    virCommandPtr cmd = NULL;
    int pipeFD[2] = { -1, -1 };
4929
    unsigned long saveMigBandwidth = priv->migMaxBandwidth;
4930
    char *errbuf = NULL;
4931
    virErrorPtr orig_err = NULL;
4932 4933 4934 4935 4936

    /* Increase migration bandwidth to unlimited since target is a file.
     * Failure to change migration speed is not fatal. */
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
        qemuMonitorSetMigrationSpeed(priv->mon,
4937 4938
                                     QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
        priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
4939
        qemuDomainObjExitMonitor(driver, vm);
4940
    }
4941

4942 4943 4944 4945 4946 4947 4948
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
        /* nothing to tear down */
        return -1;
    }

4949
    if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
4950
        (!compressor || pipe(pipeFD) == 0)) {
4951
        /* All right! We can use fd migration, which means that qemu
4952 4953 4954
         * doesn't have to open() the file, so while we still have to
         * grant SELinux access, we can do it on fd and avoid cleanup
         * later, as well as skip futzing with cgroup.  */
4955
        if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
4956
                                              compressor ? pipeFD[1] : fd) < 0)
4957
            goto cleanup;
4958 4959 4960
        bypassSecurityDriver = true;
    } else {
        /* Phooey - we have to fall back on exec migration, where qemu
E
Eric Blake 已提交
4961 4962
         * has to popen() the file by name, and block devices have to be
         * given cgroup ACL permission.  We might also stumble on
4963 4964
         * a race present in some qemu versions where it does a wait()
         * that botches pclose.  */
4965 4966
        if (virCgroupHasController(priv->cgroup,
                                   VIR_CGROUP_CONTROLLER_DEVICES)) {
4967 4968 4969 4970
            int rv = virCgroupAllowDevicePath(priv->cgroup, path,
                                              VIR_CGROUP_DEVICE_RW);
            virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path, "rw", rv == 0);
            if (rv == 1) {
E
Eric Blake 已提交
4971
                /* path was not a device, no further need for cgroup */
4972
            } else if (rv < 0) {
4973 4974
                goto cleanup;
            }
4975
        }
4976 4977
        if ((!bypassSecurityDriver) &&
            virSecurityManagerSetSavedStateLabel(driver->securityManager,
4978
                                                 vm->def, path) < 0)
4979
            goto cleanup;
4980
        restoreLabel = true;
4981 4982
    }

4983
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
4984 4985
        goto cleanup;

4986 4987 4988
    if (!compressor) {
        const char *args[] = { "cat", NULL };

4989
        if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005
            priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX) {
            rc = qemuMonitorMigrateToFd(priv->mon,
                                        QEMU_MONITOR_MIGRATE_BACKGROUND,
                                        fd);
        } else {
            rc = qemuMonitorMigrateToFile(priv->mon,
                                          QEMU_MONITOR_MIGRATE_BACKGROUND,
                                          args, path, offset);
        }
    } else {
        const char *prog = compressor;
        const char *args[] = {
            prog,
            "-c",
            NULL
        };
5006 5007 5008 5009
        if (pipeFD[0] != -1) {
            cmd = virCommandNewArgs(args);
            virCommandSetInputFD(cmd, pipeFD[0]);
            virCommandSetOutputFD(cmd, &fd);
5010 5011
            virCommandSetErrorBuffer(cmd, &errbuf);
            virCommandDoAsyncIO(cmd);
5012 5013 5014
            if (virSetCloseExec(pipeFD[1]) < 0) {
                virReportSystemError(errno, "%s",
                                     _("Unable to set cloexec flag"));
5015
                qemuDomainObjExitMonitor(driver, vm);
5016 5017 5018
                goto cleanup;
            }
            if (virCommandRunAsync(cmd, NULL) < 0) {
5019
                qemuDomainObjExitMonitor(driver, vm);
5020 5021 5022 5023 5024 5025 5026
                goto cleanup;
            }
            rc = qemuMonitorMigrateToFd(priv->mon,
                                        QEMU_MONITOR_MIGRATE_BACKGROUND,
                                        pipeFD[1]);
            if (VIR_CLOSE(pipeFD[0]) < 0 ||
                VIR_CLOSE(pipeFD[1]) < 0)
5027
                VIR_WARN("failed to close intermediate pipe");
5028 5029 5030 5031 5032
        } else {
            rc = qemuMonitorMigrateToFile(priv->mon,
                                          QEMU_MONITOR_MIGRATE_BACKGROUND,
                                          args, path, offset);
        }
5033
    }
5034
    qemuDomainObjExitMonitor(driver, vm);
5035

5036 5037 5038 5039 5040 5041
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
        goto cleanup;
    }

5042 5043 5044
    if (rc < 0)
        goto cleanup;

5045
    rc = qemuMigrationWaitForCompletion(driver, vm, asyncJob, NULL, false);
5046

5047 5048 5049 5050
    if (rc < 0) {
        if (rc == -2) {
            orig_err = virSaveLastError();
            virCommandAbort(cmd);
5051 5052
            if (virDomainObjIsActive(vm) &&
                qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5053 5054 5055 5056
                qemuMonitorMigrateCancel(priv->mon);
                qemuDomainObjExitMonitor(driver, vm);
            }
        }
5057
        goto cleanup;
5058
    }
5059

5060 5061 5062
    if (cmd && virCommandWait(cmd, NULL) < 0)
        goto cleanup;

5063 5064
    ret = 0;

5065
 cleanup:
5066 5067 5068
    if (ret < 0 && !orig_err)
        orig_err = virSaveLastError();

5069
    /* Restore max migration bandwidth */
5070 5071
    if (virDomainObjIsActive(vm) &&
        qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5072 5073
        qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth);
        priv->migMaxBandwidth = saveMigBandwidth;
5074
        qemuDomainObjExitMonitor(driver, vm);
5075 5076
    }

5077 5078
    VIR_FORCE_CLOSE(pipeFD[0]);
    VIR_FORCE_CLOSE(pipeFD[1]);
5079 5080 5081 5082 5083
    if (cmd) {
        VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf));
        VIR_FREE(errbuf);
        virCommandFree(cmd);
    }
5084 5085
    if (restoreLabel && (!bypassSecurityDriver) &&
        virSecurityManagerRestoreSavedStateLabel(driver->securityManager,
5086
                                                 vm->def, path) < 0)
5087 5088
        VIR_WARN("failed to restore save state label on %s", path);

5089 5090
    if (virCgroupHasController(priv->cgroup,
                               VIR_CGROUP_CONTROLLER_DEVICES)) {
5091 5092 5093
        int rv = virCgroupDenyDevicePath(priv->cgroup, path,
                                         VIR_CGROUP_DEVICE_RWM);
        virDomainAuditCgroupPath(vm, priv->cgroup, "deny", path, "rwm", rv == 0);
5094
    }
5095 5096 5097 5098 5099 5100

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

5101 5102
    return ret;
}
5103 5104

int
5105
qemuMigrationJobStart(virQEMUDriverPtr driver,
5106
                      virDomainObjPtr vm,
5107
                      qemuDomainAsyncJob job)
5108 5109 5110
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

5111
    if (qemuDomainObjBeginAsyncJob(driver, vm, job) < 0)
5112 5113
        return -1;

5114
    if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
5115
        qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
5116
    } else {
5117 5118 5119
        qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
                                          JOB_MASK(QEMU_JOB_SUSPEND) |
                                          JOB_MASK(QEMU_JOB_MIGRATION_OP)));
5120
    }
5121

J
Jiri Denemark 已提交
5122
    priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED;
5123 5124 5125 5126 5127

    return 0;
}

void
5128
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
5129
                         virDomainObjPtr vm,
5130
                         qemuMigrationJobPhase phase)
5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (phase < priv->job.phase) {
        VIR_ERROR(_("migration protocol going backwards %s => %s"),
                  qemuMigrationJobPhaseTypeToString(priv->job.phase),
                  qemuMigrationJobPhaseTypeToString(phase));
        return;
    }

    qemuDomainObjSetJobPhase(driver, vm, phase);
}

void
5145
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
5146
                           virDomainObjPtr vm,
5147
                           qemuMigrationJobPhase phase)
5148
{
5149
    virObjectRef(vm);
5150 5151 5152
    qemuMigrationJobSetPhase(driver, vm, phase);
}

5153
bool
5154 5155
qemuMigrationJobContinue(virDomainObjPtr vm)
{
5156
    qemuDomainObjReleaseAsyncJob(vm);
5157
    return virObjectUnref(vm);
5158 5159 5160 5161
}

bool
qemuMigrationJobIsActive(virDomainObjPtr vm,
5162
                         qemuDomainAsyncJob job)
5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (priv->job.asyncJob != job) {
        const char *msg;

        if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
            msg = _("domain '%s' is not processing incoming migration");
        else
            msg = _("domain '%s' is not being migrated");

5174
        virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
5175 5176 5177 5178 5179
        return false;
    }
    return true;
}

5180
bool
5181
qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
5182 5183 5184
{
    return qemuDomainObjEndAsyncJob(driver, vm);
}