qemu_migration.c 186.5 KB
Newer Older
1 2 3
/*
 * qemu_migration.c: QEMU migration handling
 *
4
 * Copyright (C) 2006-2015 Red Hat, Inc.
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library.  If not, see
O
Osier Yang 已提交
18
 * <http://www.gnu.org/licenses/>.
19 20 21 22 23
 *
 */

#include <config.h>

J
Ján Tomko 已提交
24 25
#include <netdb.h>
#include <sys/socket.h>
26
#include <sys/time.h>
27
#include <fcntl.h>
28
#include <poll.h>
29 30

#include "qemu_migration.h"
31
#include "qemu_migration_cookie.h"
32
#include "qemu_migration_params.h"
33 34 35 36
#include "qemu_monitor.h"
#include "qemu_domain.h"
#include "qemu_process.h"
#include "qemu_capabilities.h"
37
#include "qemu_alias.h"
38
#include "qemu_cgroup.h"
39
#include "qemu_hotplug.h"
40
#include "qemu_blockjob.h"
41
#include "qemu_security.h"
42
#include "qemu_block.h"
43

44
#include "domain_audit.h"
45
#include "virlog.h"
46
#include "virerror.h"
47
#include "viralloc.h"
E
Eric Blake 已提交
48
#include "virfile.h"
49
#include "virnetdevopenvswitch.h"
50
#include "datatypes.h"
51
#include "virfdstream.h"
52
#include "viruuid.h"
53
#include "virtime.h"
54
#include "locking/domain_lock.h"
55
#include "rpc/virnetsocket.h"
56
#include "virstoragefile.h"
M
Martin Kletzander 已提交
57
#include "viruri.h"
58
#include "virhook.h"
59
#include "virstring.h"
60
#include "virtypedparam.h"
M
Michael R. Hines 已提交
61
#include "virprocess.h"
62
#include "nwfilter_conf.h"
63
#include "virdomainsnapshotobjlist.h"
64 65 66

#define VIR_FROM_THIS VIR_FROM_QEMU

67 68
VIR_LOG_INIT("qemu.qemu_migration");

69 70 71 72 73 74 75 76 77 78 79 80 81
VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST,
              "none",
              "perform2",
              "begin3",
              "perform3",
              "perform3_done",
              "confirm3_cancelled",
              "confirm3",
              "prepare",
              "finish2",
              "finish3",
);

82 83 84
static int
qemuMigrationJobStart(virQEMUDriverPtr driver,
                      virDomainObjPtr vm,
85 86
                      qemuDomainAsyncJob job,
                      unsigned long apiFlags)
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;

static void
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
                         virDomainObjPtr vm,
                         qemuMigrationJobPhase phase)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);

static void
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           qemuMigrationJobPhase phase)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);

static void
qemuMigrationJobContinue(virDomainObjPtr obj)
    ATTRIBUTE_NONNULL(1);

static bool
qemuMigrationJobIsActive(virDomainObjPtr vm,
                         qemuDomainAsyncJob job)
    ATTRIBUTE_NONNULL(1);

static void
qemuMigrationJobFinish(virQEMUDriverPtr driver,
                       virDomainObjPtr obj)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
114

115
static void
116
qemuMigrationSrcStoreDomainState(virDomainObjPtr vm)
117 118 119 120 121 122 123 124 125 126
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    priv->preMigrationState = virDomainObjGetState(vm, NULL);

    VIR_DEBUG("Storing pre-migration state=%d domain=%p",
              priv->preMigrationState, vm);
}

/* Returns true if the domain was resumed, false otherwise */
static bool
127
qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
128 129
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
130 131
    int reason;
    virDomainState state = virDomainObjGetState(vm, &reason);
132 133
    bool ret = false;

134 135 136 137 138
    VIR_DEBUG("driver=%p, vm=%p, pre-mig-state=%s, state=%s, reason=%s",
              driver, vm,
              virDomainStateTypeToString(priv->preMigrationState),
              virDomainStateTypeToString(state),
              virDomainStateReasonToString(state, reason));
139

140 141 142 143 144
    if (state != VIR_DOMAIN_PAUSED ||
        reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
        goto cleanup;

    if (priv->preMigrationState == VIR_DOMAIN_RUNNING) {
145 146 147 148 149 150
        /* This is basically the only restore possibility that's safe
         * and we should attempt to do */

        VIR_DEBUG("Restoring pre-migration state due to migration error");

        /* we got here through some sort of failure; start the domain again */
151
        if (qemuProcessStartCPUs(driver, vm,
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
                                 VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
                                 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
            /* Hm, we already know we are in error here.  We don't want to
             * overwrite the previous error, though, so we just throw something
             * to the logs and hope for the best */
            VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name);
            goto cleanup;
        }
        ret = true;
    }

 cleanup:
    priv->preMigrationState = VIR_DOMAIN_NOSTATE;
    return ret;
}

168 169

static int
170 171 172
qemuMigrationDstPrecreateDisk(virConnectPtr conn,
                              virDomainDiskDefPtr disk,
                              unsigned long long capacity)
173 174 175 176 177 178 179 180 181 182 183 184
{
    int ret = -1;
    virStoragePoolPtr pool = NULL;
    virStorageVolPtr vol = NULL;
    char *volName = NULL, *basePath = NULL;
    char *volStr = NULL;
    virBuffer buf = VIR_BUFFER_INITIALIZER;
    const char *format = NULL;
    unsigned int flags = 0;

    VIR_DEBUG("Precreate disk type=%s", virStorageTypeToString(disk->src->type));

185
    switch ((virStorageType)disk->src->type) {
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
    case VIR_STORAGE_TYPE_FILE:
        if (!virDomainDiskGetSource(disk)) {
            VIR_DEBUG("Dropping sourceless disk '%s'",
                      disk->dst);
            return 0;
        }

        if (VIR_STRDUP(basePath, disk->src->path) < 0)
            goto cleanup;

        if (!(volName = strrchr(basePath, '/'))) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("malformed disk path: %s"),
                           disk->src->path);
            goto cleanup;
        }

        *volName = '\0';
        volName++;

206
        if (!(pool = virStoragePoolLookupByTargetPath(conn, basePath)))
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
            goto cleanup;
        format = virStorageFileFormatTypeToString(disk->src->format);
        if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
            flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
        break;

    case VIR_STORAGE_TYPE_VOLUME:
        if (!(pool = virStoragePoolLookupByName(conn, disk->src->srcpool->pool)))
            goto cleanup;
        format = virStorageFileFormatTypeToString(disk->src->format);
        volName = disk->src->srcpool->volume;
        if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
            flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
        break;

222 223 224 225 226
    case VIR_STORAGE_TYPE_NETWORK:
        VIR_DEBUG("Skipping creation of network disk '%s'",
                  disk->dst);
        return 0;

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
    case VIR_STORAGE_TYPE_BLOCK:
    case VIR_STORAGE_TYPE_DIR:
    case VIR_STORAGE_TYPE_NONE:
    case VIR_STORAGE_TYPE_LAST:
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("cannot precreate storage for disk type '%s'"),
                       virStorageTypeToString(disk->src->type));
        goto cleanup;
    }

    if ((vol = virStorageVolLookupByName(pool, volName))) {
        VIR_DEBUG("Skipping creation of already existing volume of name '%s'",
                  volName);
        ret = 0;
        goto cleanup;
    }

    virBufferAddLit(&buf, "<volume>\n");
    virBufferAdjustIndent(&buf, 2);
    virBufferEscapeString(&buf, "<name>%s</name>\n", volName);
    virBufferAsprintf(&buf, "<capacity>%llu</capacity>\n", capacity);
    virBufferAddLit(&buf, "<target>\n");
    virBufferAdjustIndent(&buf, 2);
    virBufferAsprintf(&buf, "<format type='%s'/>\n", format);
    virBufferAdjustIndent(&buf, -2);
    virBufferAddLit(&buf, "</target>\n");
    virBufferAdjustIndent(&buf, -2);
    virBufferAddLit(&buf, "</volume>\n");

    if (!(volStr = virBufferContentAndReset(&buf))) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("unable to create volume XML"));
        goto cleanup;
    }

    if (!(vol = virStorageVolCreateXML(pool, volStr, flags)))
        goto cleanup;

    ret = 0;
 cleanup:
    VIR_FREE(basePath);
    VIR_FREE(volStr);
    virObjectUnref(vol);
    virObjectUnref(pool);
    return ret;
}

274
static bool
275 276
qemuMigrationAnyCopyDisk(virDomainDiskDef const *disk,
                         size_t nmigrate_disks, const char **migrate_disks)
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
{
    size_t i;

    /* Check if the disk alias is in the list */
    if (nmigrate_disks) {
        for (i = 0; i < nmigrate_disks; i++) {
            if (STREQ(disk->dst, migrate_disks[i]))
                return true;
        }
        return false;
    }

    /* Default is to migrate only non-shared non-readonly disks
     * with source */
    return !disk->src->shared && !disk->src->readonly &&
292
           !virStorageSourceIsEmpty(disk->src);
293 294
}

295 296

static int
297 298 299 300 301
qemuMigrationDstPrecreateStorage(virDomainObjPtr vm,
                                 qemuMigrationCookieNBDPtr nbd,
                                 size_t nmigrate_disks,
                                 const char **migrate_disks,
                                 bool incremental)
302 303 304
{
    int ret = -1;
    size_t i = 0;
305
    virConnectPtr conn;
306 307 308 309

    if (!nbd || !nbd->ndisks)
        return 0;

310 311 312
    if (!(conn = virGetConnectStorage()))
        return -1;

313 314 315 316
    for (i = 0; i < nbd->ndisks; i++) {
        virDomainDiskDefPtr disk;
        const char *diskSrcPath;

317
        VIR_DEBUG("Looking up disk target '%s' (capacity=%llu)",
318 319
                  nbd->disks[i].target, nbd->disks[i].capacity);

320 321
        if (!(disk = virDomainDiskByName(vm->def, nbd->disks[i].target,
                                         false))) {
322 323 324 325 326 327 328 329
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("unable to find disk by target: %s"),
                           nbd->disks[i].target);
            goto cleanup;
        }

        diskSrcPath = virDomainDiskGetSource(disk);

330
        /* Skip disks we don't want to migrate and already existing disks. */
331
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks) ||
332 333 334 335
            (diskSrcPath && virFileExists(diskSrcPath))) {
            continue;
        }

336 337 338 339 340 341 342
        if (incremental) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("pre-creation of storage targets for incremental "
                             "storage migration is not supported"));
            goto cleanup;
        }

343 344
        VIR_DEBUG("Proceeding with disk source %s", NULLSTR(diskSrcPath));

345
        if (qemuMigrationDstPrecreateDisk(conn, disk, nbd->disks[i].capacity) < 0)
346 347 348 349 350
            goto cleanup;
    }

    ret = 0;
 cleanup:
351
    virObjectUnref(conn);
352 353 354 355
    return ret;
}


356
/**
357
 * qemuMigrationDstStartNBDServer:
358 359 360 361 362 363 364 365 366 367 368
 * @driver: qemu driver
 * @vm: domain
 *
 * Starts NBD server. This is a newer method to copy
 * storage during migration than using 'blk' and 'inc'
 * arguments in 'migrate' monitor command.
 * Error is reported here.
 *
 * Returns 0 on success, -1 otherwise.
 */
static int
369 370 371 372 373
qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
                               const char *listenAddr,
                               size_t nmigrate_disks,
                               const char **migrate_disks,
374 375
                               int nbdPort,
                               const char *tls_alias)
376 377 378 379 380 381 382
{
    int ret = -1;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    unsigned short port = 0;
    char *diskAlias = NULL;
    size_t i;

383 384 385 386 387 388
    if (nbdPort < 0 || nbdPort > USHRT_MAX) {
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("nbd port must be in range 0-65535"));
        return -1;
    }

389 390 391
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];

392
        /* check whether disk should be migrated */
393
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
394 395
            continue;

396
        if (disk->src->readonly || virStorageSourceIsEmpty(disk->src)) {
397
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
398 399
                           _("Cannot migrate empty or read-only disk %s"),
                           disk->dst);
400 401 402
            goto cleanup;
        }

403
        VIR_FREE(diskAlias);
404
        if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
405 406 407 408 409 410
            goto cleanup;

        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
            goto cleanup;

411 412 413 414 415 416
        if (port == 0) {
            if (nbdPort)
                port = nbdPort;
            else if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
                goto exit_monitor;

417
            if (qemuMonitorNBDServerStart(priv->mon, listenAddr, port, tls_alias) < 0)
418
                goto exit_monitor;
419 420
        }

421 422 423
        if (qemuMonitorNBDServerAdd(priv->mon, diskAlias, true) < 0)
            goto exit_monitor;
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
424 425 426 427 428 429
            goto cleanup;
    }

    priv->nbdPort = port;
    ret = 0;

430
 cleanup:
431
    VIR_FREE(diskAlias);
432
    if (ret < 0 && nbdPort == 0)
433
        virPortAllocatorRelease(port);
434
    return ret;
435 436 437 438

 exit_monitor:
    ignore_value(qemuDomainObjExitMonitor(driver, vm));
    goto cleanup;
439 440
}

441 442

static int
443 444 445
qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              qemuMigrationCookiePtr mig)
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (!mig->nbd)
        return 0;

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
        return -1;

    if (qemuMonitorNBDServerStop(priv->mon) < 0)
        VIR_WARN("Unable to stop NBD server");
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
        return -1;

461
    virPortAllocatorRelease(priv->nbdPort);
462 463 464 465 466
    priv->nbdPort = 0;
    return 0;
}


467
static void
468 469
qemuMigrationNBDReportMirrorError(qemuBlockJobDataPtr job,
                                  const char *diskdst)
470
{
471
    if (job->errmsg) {
472 473
        virReportError(VIR_ERR_OPERATION_FAILED,
                       _("migration of disk %s failed: %s"),
474
                       diskdst, job->errmsg);
475 476
    } else {
        virReportError(VIR_ERR_OPERATION_FAILED,
477
                       _("migration of disk %s failed"), diskdst);
478 479 480 481
    }
}


482
/**
483
 * qemuMigrationSrcNBDStorageCopyReady:
484 485
 * @vm: domain
 *
486 487
 * Check the status of all drives copied via qemuMigrationSrcNBDStorageCopy.
 * Any pending block job events for the mirrored disks will be processed.
488 489 490 491 492 493
 *
 * Returns 1 if all mirrors are "ready",
 *         0 if some mirrors are still performing initial sync,
 *        -1 on error.
 */
static int
494 495
qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm,
                                    qemuDomainAsyncJob asyncJob)
496 497
{
    size_t i;
498 499
    size_t notReady = 0;
    int status;
500 501 502

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
503
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
504
        qemuBlockJobDataPtr job;
505

506
        if (!diskPriv->migrating)
507 508
            continue;

509 510 511 512 513 514 515
        if (!(job = qemuBlockJobDiskGetJob(disk))) {
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("missing block job data for disk '%s'"), disk->dst);
            return -1;
        }

        status = qemuBlockJobUpdate(vm, job, asyncJob);
516
        if (status == VIR_DOMAIN_BLOCK_JOB_FAILED) {
517 518
            qemuMigrationNBDReportMirrorError(job, disk->dst);
            virObjectUnref(job);
519 520
            return -1;
        }
521

522 523
        virObjectUnref(job);

524 525
        if (disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY)
            notReady++;
526 527
    }

528 529 530 531 532 533 534
    if (notReady) {
        VIR_DEBUG("Waiting for %zu disk mirrors to get ready", notReady);
        return 0;
    } else {
        VIR_DEBUG("All disk mirrors are ready");
        return 1;
    }
535 536 537
}


538 539 540 541
/*
 * If @check is true, the function will report an error and return a different
 * code in case a block job fails. This way we can properly abort migration in
 * case some block jobs failed once all memory has already been transferred.
542
 *
543 544 545 546
 * Returns 1 if all mirrors are gone,
 *         0 if some mirrors are still active,
 *         -1 some mirrors failed but some are still active,
 *         -2 all mirrors are gone but some of them failed.
547 548
 */
static int
549 550 551
qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm,
                                 qemuDomainAsyncJob asyncJob,
                                 bool check)
552
{
553 554
    size_t i;
    size_t active = 0;
555
    size_t completed = 0;
556 557
    int status;
    bool failed = false;
558

559
 retry:
560 561 562
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
563
        qemuBlockJobDataPtr job;
564

565 566
        if (!diskPriv->migrating)
            continue;
567

568 569 570 571
        if (!(job = qemuBlockJobDiskGetJob(disk)))
            continue;

        status = qemuBlockJobUpdate(vm, job, asyncJob);
572 573 574
        switch (status) {
        case VIR_DOMAIN_BLOCK_JOB_FAILED:
            if (check) {
575
                qemuMigrationNBDReportMirrorError(job, disk->dst);
576
                failed = true;
577
            }
M
Marc Hartmayer 已提交
578
            ATTRIBUTE_FALLTHROUGH;
579 580 581 582
        case VIR_DOMAIN_BLOCK_JOB_CANCELED:
        case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
            diskPriv->migrating = false;
            break;
583

584 585
        default:
            active++;
586
        }
587 588 589

        if (status == VIR_DOMAIN_BLOCK_JOB_COMPLETED)
            completed++;
590 591

        virObjectUnref(job);
592 593 594 595 596 597 598 599 600 601
    }

    /* Updating completed block job drops the lock thus we have to recheck
     * block jobs for disks that reside before the disk(s) with completed
     * block job.
     */
    if (completed > 0) {
        completed = 0;
        active = 0;
        goto retry;
602
    }
603

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
    if (failed) {
        if (active) {
            VIR_DEBUG("Some disk mirrors failed; still waiting for %zu "
                      "disk mirrors to finish", active);
            return -1;
        } else {
            VIR_DEBUG("All disk mirrors are gone; some of them failed");
            return -2;
        }
    } else {
        if (active) {
            VIR_DEBUG("Waiting for %zu disk mirrors to finish", active);
            return 0;
        } else {
            VIR_DEBUG("All disk mirrors are gone");
            return 1;
620
        }
621
    }
622
}
623 624


625 626 627 628 629 630
/*
 * Returns 0 on success,
 *         1 when job is already completed or it failed and failNoJob is false,
 *         -1 on error or when job failed and failNoJob is true.
 */
static int
631 632 633
qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 virDomainDiskDefPtr disk,
634
                                 qemuBlockJobDataPtr job,
635 636
                                 bool failNoJob,
                                 qemuDomainAsyncJob asyncJob)
637 638 639 640 641 642 643
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    char *diskAlias = NULL;
    int ret = -1;
    int status;
    int rv;

644
    status = qemuBlockJobUpdate(vm, job, asyncJob);
645 646 647 648
    switch (status) {
    case VIR_DOMAIN_BLOCK_JOB_FAILED:
    case VIR_DOMAIN_BLOCK_JOB_CANCELED:
        if (failNoJob) {
649
            qemuMigrationNBDReportMirrorError(job, disk->dst);
650
            goto cleanup;
651
        }
652
        ATTRIBUTE_FALLTHROUGH;
653
    case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
654 655
        ret = 1;
        goto cleanup;
656 657
    }

658
    if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
659 660
        return -1;

661
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
662 663
        goto cleanup;

664
    rv = qemuMonitorBlockJobCancel(priv->mon, diskAlias);
665 666 667 668 669

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        goto cleanup;

    ret = 0;
670 671 672 673 674 675 676 677

 cleanup:
    VIR_FREE(diskAlias);
    return ret;
}


/**
678
 * qemuMigrationSrcNBDCopyCancel:
679 680
 * @driver: qemu driver
 * @vm: domain
681
 * @check: if true report an error when some of the mirrors fails
682
 *
683
 * Cancel all drive-mirrors started by qemuMigrationSrcNBDStorageCopy.
684 685 686
 * Any pending block job events for the affected disks will be processed and
 * synchronous block job terminated regardless of return value unless qemu
 * has crashed.
687 688 689 690
 *
 * Returns 0 on success, -1 otherwise.
 */
static int
691 692 693 694 695
qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              bool check,
                              qemuDomainAsyncJob asyncJob,
                              virConnectPtr dconn)
696
{
697
    virErrorPtr err = NULL;
698
    int ret = -1;
699
    size_t i;
700 701 702 703
    int rv;
    bool failed = false;

    VIR_DEBUG("Cancelling drive mirrors for domain %s", vm->def->name);
704 705 706

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
707
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
708
        qemuBlockJobDataPtr job;
709

710 711
        if (!(job = qemuBlockJobDiskGetJob(disk)) ||
            !qemuBlockJobIsRunning(job))
712 713
            diskPriv->migrating = false;

714 715
        if (!diskPriv->migrating) {
            virObjectUnref(job);
716
            continue;
717
        }
718

719
        rv = qemuMigrationSrcNBDCopyCancelOne(driver, vm, disk, job,
720
                                              check, asyncJob);
721 722 723 724 725 726
        if (rv != 0) {
            if (rv < 0) {
                if (!err)
                    err = virSaveLastError();
                failed = true;
            }
727
            qemuBlockJobSyncEnd(vm, job, asyncJob);
728 729
            diskPriv->migrating = false;
        }
730 731

        virObjectUnref(job);
732 733
    }

734
    while ((rv = qemuMigrationSrcNBDCopyCancelled(vm, asyncJob, check)) != 1) {
735 736 737 738 739 740 741
        if (check && !failed &&
            dconn && virConnectIsAlive(dconn) <= 0) {
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Lost connection to destination host"));
            failed = true;
        }

742 743 744 745
        if (rv < 0) {
            failed = true;
            if (rv == -2)
                break;
746
        }
747

748 749 750 751 752
        if (failed && !err)
            err = virSaveLastError();

        if (virDomainObjWait(vm) < 0)
            goto cleanup;
753 754
    }

755 756 757 758 759 760 761 762 763
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);

        if (!diskPriv->migrSource)
            continue;

        qemuBlockStorageSourceDetachOneBlockdev(driver, vm, asyncJob,
                                                diskPriv->migrSource);
764
        virObjectUnref(diskPriv->migrSource);
765 766 767
        diskPriv->migrSource = NULL;
    }

768 769 770
    ret = failed ? -1 : 0;

 cleanup:
771 772 773 774 775
    if (err) {
        virSetError(err);
        virFreeError(err);
    }
    return ret;
776 777 778
}


779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
static int
qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver,
                                       virDomainObjPtr vm,
                                       virDomainDiskDefPtr disk,
                                       const char *diskAlias,
                                       const char *host,
                                       int port,
                                       unsigned long long mirror_speed,
                                       unsigned int mirror_flags,
                                       const char *tlsAlias)
{
    qemuBlockStorageSourceAttachDataPtr data = NULL;
    qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
    int mon_ret = 0;
    int ret = -1;
794
    VIR_AUTOUNREF(virStorageSourcePtr) copysrc = NULL;
795 796 797

    VIR_DEBUG("starting blockdev mirror for disk=%s to host=%s", diskAlias, host);

798
    if (!(copysrc = virStorageSourceNew()))
799 800 801 802 803 804
        goto cleanup;

    copysrc->type = VIR_STORAGE_TYPE_NETWORK;
    copysrc->protocol = VIR_STORAGE_NET_PROTOCOL_NBD;
    copysrc->format = VIR_STORAGE_FILE_RAW;

805
    if (!(copysrc->backingStore = virStorageSourceNew()))
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
        goto cleanup;

    if (VIR_STRDUP(copysrc->path, diskAlias) < 0)
        goto cleanup;

    if (VIR_ALLOC_N(copysrc->hosts, 1) < 0)
        goto cleanup;

    copysrc->nhosts = 1;
    copysrc->hosts->transport = VIR_STORAGE_NET_HOST_TRANS_TCP;
    copysrc->hosts->port = port;
    if (VIR_STRDUP(copysrc->hosts->name, host) < 0)
        goto cleanup;

    if (VIR_STRDUP(copysrc->tlsAlias, tlsAlias) < 0)
        goto cleanup;

    if (virAsprintf(&copysrc->nodestorage, "migration-%s-storage", disk->dst) < 0 ||
        virAsprintf(&copysrc->nodeformat, "migration-%s-format", disk->dst) < 0)
        goto cleanup;

    if (!(data = qemuBlockStorageSourceAttachPrepareBlockdev(copysrc)))
        goto cleanup;

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
        goto cleanup;

    mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);

    if (mon_ret == 0)
        mon_ret = qemuMonitorBlockdevMirror(qemuDomainGetMonitor(vm), NULL,
                                            diskAlias, copysrc->nodeformat,
                                            mirror_speed, 0, 0, mirror_flags);

    if (mon_ret != 0)
        qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
        goto cleanup;

    VIR_STEAL_PTR(diskPriv->migrSource, copysrc);

    ret = 0;

 cleanup:
    qemuBlockStorageSourceAttachDataFree(data);
    return ret;
}


857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
static int
qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver,
                                          virDomainObjPtr vm,
                                          const char *diskAlias,
                                          const char *host,
                                          int port,
                                          unsigned long long mirror_speed,
                                          unsigned int mirror_flags)
{
    char *nbd_dest = NULL;
    int mon_ret;
    int ret = -1;

    if (strchr(host, ':')) {
        if (virAsprintf(&nbd_dest, "nbd:[%s]:%d:exportname=%s",
                        host, port, diskAlias) < 0)
            goto cleanup;
    } else {
        if (virAsprintf(&nbd_dest, "nbd:%s:%d:exportname=%s",
                        host, port, diskAlias) < 0)
            goto cleanup;
    }

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
        goto cleanup;

    mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
                                     diskAlias, nbd_dest, "raw",
                                     mirror_speed, 0, 0, mirror_flags);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
        goto cleanup;

    ret = 0;

 cleanup:
    VIR_FREE(nbd_dest);
    return ret;
}


899 900 901 902 903 904 905 906 907 908 909 910
static int
qemuMigrationSrcNBDStorageCopyOne(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  virDomainDiskDefPtr disk,
                                  const char *host,
                                  int port,
                                  unsigned long long mirror_speed,
                                  unsigned int mirror_flags,
                                  const char *tlsAlias,
                                  unsigned int flags)
{
    qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
911
    qemuBlockJobDataPtr job = NULL;
912 913 914 915 916 917 918
    char *diskAlias = NULL;
    int rc;
    int ret = -1;

    if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
        goto cleanup;

919
    if (!(job = qemuBlockJobDiskNew(disk, QEMU_BLOCKJOB_TYPE_COPY, diskAlias)))
920 921
        goto cleanup;

922
    qemuBlockJobSyncBegin(job);
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937

    if (flags & VIR_MIGRATE_TLS) {
        rc = qemuMigrationSrcNBDStorageCopyBlockdev(driver, vm,
                                                    disk, diskAlias,
                                                    host, port,
                                                    mirror_speed,
                                                    mirror_flags,
                                                    tlsAlias);
    } else {
        rc = qemuMigrationSrcNBDStorageCopyDriveMirror(driver, vm, diskAlias,
                                                       host, port,
                                                       mirror_speed,
                                                       mirror_flags);
    }

938
    if (rc < 0)
939 940 941
        goto cleanup;

    diskPriv->migrating = true;
942
    qemuBlockJobStarted(job);
943 944 945 946

    ret = 0;

 cleanup:
947
    qemuBlockJobStartupFinalize(job);
948 949 950 951 952
    VIR_FREE(diskAlias);
    return ret;
}


953
/**
954
 * qemuMigrationSrcNBDStorageCopy:
955 956 957 958
 * @driver: qemu driver
 * @vm: domain
 * @mig: migration cookie
 * @host: where are we migrating to
959
 * @speed: bandwidth limit in MiB/s
960 961
 * @migrate_flags: migrate monitor command flags
 *
962 963 964
 * Migrate non-shared storage using the NBD protocol to the server running
 * inside the qemu process on dst and wait until the copy converges.
 * On success update @migrate_flags so we don't tell 'migrate' command
965
 * to do the very same operation. On failure, the caller is
966 967
 * expected to call qemuMigrationSrcNBDCopyCancel to stop all
 * running copy operations.
968 969 970 971 972
 *
 * Returns 0 on success (@migrate_flags updated),
 *        -1 otherwise.
 */
static int
973 974 975 976 977 978 979 980
qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
                               qemuMigrationCookiePtr mig,
                               const char *host,
                               unsigned long speed,
                               unsigned int *migrate_flags,
                               size_t nmigrate_disks,
                               const char **migrate_disks,
981 982 983
                               virConnectPtr dconn,
                               const char *tlsAlias,
                               unsigned int flags)
984 985 986 987
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret = -1;
    int port;
988
    size_t i;
989
    unsigned long long mirror_speed = speed;
990
    unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
991
    int rv;
992
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
993 994

    VIR_DEBUG("Starting drive mirrors for domain %s", vm->def->name);
995

996 997 998 999 1000 1001 1002 1003
    if (mirror_speed > LLONG_MAX >> 20) {
        virReportError(VIR_ERR_OVERFLOW,
                       _("bandwidth must be less than %llu"),
                       LLONG_MAX >> 20);
        goto cleanup;
    }
    mirror_speed <<= 20;

1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
    /* steal NBD port and thus prevent its propagation back to destination */
    port = mig->nbd->port;
    mig->nbd->port = 0;

    if (*migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC)
        mirror_flags |= VIR_DOMAIN_BLOCK_REBASE_SHALLOW;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];

1014
        /* check whether disk should be migrated */
1015
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
1016 1017
            continue;

1018 1019 1020
        if (qemuMigrationSrcNBDStorageCopyOne(driver, vm, disk, host, port,
                                              mirror_speed, mirror_flags,
                                              tlsAlias, flags) < 0)
1021 1022
            goto cleanup;

1023
        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
1024 1025 1026
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
            goto cleanup;
        }
1027
    }
1028

1029
    while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
1030 1031
        if (rv < 0)
            goto cleanup;
1032

1033
        if (priv->job.abortJob) {
1034
            priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
1035 1036 1037 1038
            virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                           qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                           _("canceled by client"));
            goto cleanup;
1039
        }
1040

1041 1042 1043 1044 1045 1046
        if (dconn && virConnectIsAlive(dconn) <= 0) {
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Lost connection to destination host"));
            goto cleanup;
        }

1047
        if (virDomainObjWait(vm) < 0)
1048
            goto cleanup;
1049 1050
    }

1051 1052
    qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                     priv->job.current);
1053

1054
    /* Okay, all disks are ready. Modify migrate_flags */
1055 1056 1057 1058
    *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                        QEMU_MONITOR_MIGRATE_NON_SHARED_INC);
    ret = 0;

1059
 cleanup:
1060
    virObjectUnref(cfg);
1061 1062
    return ret;
}
1063

1064

1065
/**
1066
 * qemuMigrationSrcIsAllowedHostdev:
1067 1068
 * @def: domain definition
 *
1069
 * Checks that @def does not contain any host devices unsupported across
1070 1071 1072
 * migrations. Returns true if the vm is allowed to migrate.
 */
static bool
1073
qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
{
    size_t i;

    /* Migration with USB host devices is allowed, all other devices are
     * forbidden. */
    for (i = 0; i < def->nhostdevs; i++) {
        virDomainHostdevDefPtr hostdev = def->hostdevs[i];
        if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
            hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("domain has assigned non-USB host devices"));
            return false;
        }
    }

    return true;
}


1093
/**
1094
 * qemuMigrationSrcIsAllowed:
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
 * @driver: qemu driver struct
 * @vm: domain object
 * @remote: migration is remote
 * @flags: migration flags (see struct virDomainMigrateFlags)
 *
 * Validates that the configuration of @vm can be migrated in various
 * situations. If @remote is true, the migration happens to remote host. @flags
 * is used to check various special migration types according to the request.
 *
 * Returns true if migration is supported. Reports libvirt error and returns
 * false otherwise.
 */
1107
bool
1108 1109 1110 1111
qemuMigrationSrcIsAllowed(virQEMUDriverPtr driver,
                          virDomainObjPtr vm,
                          bool remote,
                          unsigned int flags)
1112
{
1113
    int nsnapshots;
1114
    int pauseReason;
1115
    size_t i;
1116

1117 1118 1119 1120 1121
    /* perform these checks only when migrating to remote hosts */
    if (remote) {
        nsnapshots = virDomainSnapshotObjListNum(vm->snapshots, NULL, 0);
        if (nsnapshots < 0)
            return false;
1122

1123 1124 1125 1126 1127
        if (nsnapshots > 0) {
            virReportError(VIR_ERR_OPERATION_INVALID,
                           _("cannot migrate domain with %d snapshots"),
                           nsnapshots);
            return false;
1128
        }
1129

1130
        /* cancel migration if disk I/O error is emitted while migrating */
1131
        if (flags & VIR_MIGRATE_ABORT_ON_ERROR &&
1132
            !(flags & VIR_MIGRATE_OFFLINE) &&
1133 1134
            virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
            pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
E
Eric Blake 已提交
1135
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1136
                           _("cannot migrate domain with I/O error"));
E
Eric Blake 已提交
1137 1138
            return false;
        }
1139 1140

    }
1141

1142 1143 1144 1145 1146 1147 1148
    /* following checks don't make sense for offline migration */
    if (!(flags & VIR_MIGRATE_OFFLINE)) {
        if (qemuProcessAutoDestroyActive(driver, vm)) {
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("domain is marked for auto destroy"));
            return false;
        }
1149

1150

1151 1152
        if (qemuDomainHasBlockjob(vm, false)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1153
                           _("domain has active block job"));
1154 1155 1156
            return false;
        }

1157
        if (!qemuMigrationSrcIsAllowedHostdev(vm->def))
1158 1159
            return false;

1160
        if (vm->def->cpu) {
1161 1162 1163
            /* QEMU blocks migration and save with invariant TSC enabled
             * unless TSC frequency is explicitly set.
             */
1164 1165
            if (virCPUCheckFeature(vm->def->os.arch, vm->def->cpu,
                                   "invtsc") == 1) {
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
                bool block = true;

                for (i = 0; i < vm->def->clock.ntimers; i++) {
                    virDomainTimerDefPtr timer = vm->def->clock.timers[i];

                    if (timer->name == VIR_DOMAIN_TIMER_NAME_TSC &&
                        timer->frequency > 0) {
                        block = false;
                        break;
                    }
                }

                if (block) {
                    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                                   _("domain has 'invtsc' CPU feature but "
                                     "TSC frequency is not specified"));
                    return false;
                }
1184
            }
J
Ján Tomko 已提交
1185 1186
        }

1187 1188 1189
        /* Verify that memory device config can be transferred reliably */
        for (i = 0; i < vm->def->nmems; i++) {
            virDomainMemoryDefPtr mem = vm->def->mems[i];
1190

1191 1192 1193 1194 1195
            if (mem->model == VIR_DOMAIN_MEMORY_MODEL_DIMM &&
                mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM) {
                virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                               _("domain's dimm info lacks slot ID "
                                 "or base address"));
1196

1197 1198
                return false;
            }
1199
        }
1200 1201 1202 1203 1204 1205

        if (vm->def->nshmems) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("migration with shmem device is not supported"));
            return false;
        }
1206 1207
    }

1208 1209 1210
    return true;
}

1211
static bool
1212 1213 1214 1215
qemuMigrationSrcIsSafe(virDomainDefPtr def,
                       size_t nmigrate_disks,
                       const char **migrate_disks,
                       unsigned int flags)
1216

1217
{
1218 1219
    bool storagemigration = flags & (VIR_MIGRATE_NON_SHARED_DISK |
                                     VIR_MIGRATE_NON_SHARED_INC);
1220
    size_t i;
1221
    int rc;
1222

1223
    for (i = 0; i < def->ndisks; i++) {
1224
        virDomainDiskDefPtr disk = def->disks[i];
1225
        const char *src = virDomainDiskGetSource(disk);
1226

1227 1228
        /* Disks without any source (i.e. floppies and CD-ROMs)
         * OR readonly are safe. */
1229
        if (virStorageSourceIsEmpty(disk->src) ||
1230
            disk->src->readonly)
1231
            continue;
1232

1233
        /* Disks which are migrated by qemu are safe too. */
1234
        if (storagemigration &&
1235
            qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
1236
            continue;
1237

1238
        /* However, disks on local FS (e.g. ext4) are not safe. */
1239
        if (virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_FILE) {
1240
            if ((rc = virFileIsSharedFS(src)) < 0) {
1241
                return false;
1242 1243 1244 1245 1246
            } else if (rc == 0) {
                virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
                               _("Migration without shared storage is unsafe"));
                return false;
            }
1247 1248 1249 1250
            if ((rc = virStorageFileIsClusterFS(src)) < 0)
                return false;
            else if (rc == 1)
                continue;
1251 1252
        } else if (virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_NETWORK) {
            /* But network disks are safe again. */
1253
            continue;
1254
        }
1255

1256 1257 1258 1259 1260 1261 1262
        /* Our code elsewhere guarantees shared disks are either readonly (in
         * which case cache mode doesn't matter) or used with cache=none or used with cache=directsync */
        if (disk->src->shared ||
            disk->cachemode == VIR_DOMAIN_DISK_CACHE_DISABLE ||
            disk->cachemode == VIR_DOMAIN_DISK_CACHE_DIRECTSYNC)
            continue;

1263 1264
        virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
                       _("Migration may lead to data corruption if disks"
1265
                         " use cache != none or cache != directsync"));
1266
        return false;
1267 1268 1269 1270 1271
    }

    return true;
}

1272 1273

void
1274 1275
qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
                               virDomainObjPtr vm)
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
{
    virDomainState state;
    int reason;

    state = virDomainObjGetState(vm, &reason);

    if (state != VIR_DOMAIN_PAUSED &&
        state != VIR_DOMAIN_RUNNING)
        return;

    if (state == VIR_DOMAIN_PAUSED &&
        reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
        return;

    VIR_WARN("Migration of domain %s failed during post-copy; "
             "leaving the domain paused", vm->def->name);

    if (state == VIR_DOMAIN_RUNNING) {
        if (qemuProcessStopCPUs(driver, vm,
                                VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
1296
                                QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
1297 1298 1299 1300 1301 1302 1303 1304
            VIR_WARN("Unable to pause guest CPUs for %s", vm->def->name);
    } else {
        virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
                             VIR_DOMAIN_PAUSED_POSTCOPY_FAILED);
    }
}


1305
static int
1306
qemuMigrationSrcWaitForSpice(virDomainObjPtr vm)
1307 1308 1309
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

1310
    if (!priv->job.spiceMigration)
1311 1312
        return 0;

1313
    VIR_DEBUG("Waiting for SPICE to finish migration");
1314 1315
    while (!priv->job.spiceMigrated && !priv->job.abortJob) {
        if (virDomainObjWait(vm) < 0)
1316 1317 1318 1319
            return -1;
    }
    return 0;
}
1320

1321 1322 1323 1324

static void
qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
{
1325
    switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) {
1326 1327 1328 1329
    case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY;
        break;

1330
    case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
1331
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED;
1332 1333 1334
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
1335
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE;
1336 1337 1338
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
1339
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1340 1341 1342
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
1343
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
1344 1345
        break;

1346 1347 1348 1349 1350 1351 1352 1353
    case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED;
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
        break;

1354 1355 1356
    case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
    case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
    case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING:
1357
    case QEMU_MONITOR_MIGRATION_STATUS_LAST:
1358 1359 1360 1361 1362 1363
        break;
    }
}


int
1364 1365 1366 1367 1368
qemuMigrationAnyFetchStats(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           qemuDomainAsyncJob asyncJob,
                           qemuDomainJobInfoPtr jobInfo,
                           char **error)
1369 1370
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1371
    qemuMonitorMigrationStats stats;
1372 1373 1374 1375 1376
    int rv;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

1377
    rv = qemuMonitorGetMigrationStats(priv->mon, &stats, error);
1378 1379 1380 1381

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        return -1;

1382
    jobInfo->stats.mig = stats;
1383 1384

    return 0;
1385 1386 1387
}


1388 1389 1390 1391 1392 1393 1394
static const char *
qemuMigrationJobName(virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    switch (priv->job.asyncJob) {
    case QEMU_ASYNC_JOB_MIGRATION_OUT:
1395
        return _("migration out job");
1396 1397 1398 1399
    case QEMU_ASYNC_JOB_SAVE:
        return _("domain save job");
    case QEMU_ASYNC_JOB_DUMP:
        return _("domain core dump job");
1400 1401 1402 1403 1404 1405 1406 1407 1408
    case QEMU_ASYNC_JOB_NONE:
        return _("undefined");
    case QEMU_ASYNC_JOB_MIGRATION_IN:
        return _("migration in job");
    case QEMU_ASYNC_JOB_SNAPSHOT:
        return _("snapshot job");
    case QEMU_ASYNC_JOB_START:
        return _("start job");
    case QEMU_ASYNC_JOB_LAST:
1409 1410 1411 1412 1413 1414
    default:
        return _("job");
    }
}


1415
static int
1416
qemuMigrationJobCheckStatus(virQEMUDriverPtr driver,
1417
                            virDomainObjPtr vm,
1418
                            qemuDomainAsyncJob asyncJob)
1419 1420 1421
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
1422
    char *error = NULL;
1423
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
1424
    int ret = -1;
1425

1426
    if (!events ||
1427
        jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
1428
        if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
1429 1430
            return -1;
    }
1431

1432 1433
    qemuMigrationUpdateJobType(jobInfo);

1434 1435
    switch (jobInfo->status) {
    case QEMU_DOMAIN_JOB_STATUS_NONE:
1436 1437
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("is not active"));
1438
        goto cleanup;
1439

1440
    case QEMU_DOMAIN_JOB_STATUS_FAILED:
1441
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
1442 1443 1444
                       qemuMigrationJobName(vm),
                       error ? error : _("unexpectedly failed"));
        goto cleanup;
1445

1446
    case QEMU_DOMAIN_JOB_STATUS_CANCELED:
1447 1448
        virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("canceled by client"));
1449
        goto cleanup;
1450

1451 1452
    case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
    case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
1453
    case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
1454
    case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
1455
    case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
1456
    case QEMU_DOMAIN_JOB_STATUS_PAUSED:
1457 1458
        break;
    }
1459 1460 1461 1462 1463 1464

    ret = 0;

 cleanup:
    VIR_FREE(error);
    return ret;
1465 1466 1467
}


1468 1469
enum qemuMigrationCompletedFlags {
    QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR = (1 << 0),
1470
    /* This flag should only be set when run on src host */
1471
    QEMU_MIGRATION_COMPLETED_CHECK_STORAGE  = (1 << 1),
1472
    QEMU_MIGRATION_COMPLETED_POSTCOPY       = (1 << 2),
1473
    QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER = (1 << 3),
1474 1475
};

1476

1477 1478 1479 1480 1481 1482 1483
/**
 * Returns 1 if migration completed successfully,
 *         0 if the domain is still being migrated,
 *         -1 migration failed,
 *         -2 something else failed, we need to cancel migration.
 */
static int
1484 1485 1486 1487 1488
qemuMigrationAnyCompleted(virQEMUDriverPtr driver,
                          virDomainObjPtr vm,
                          qemuDomainAsyncJob asyncJob,
                          virConnectPtr dconn,
                          unsigned int flags)
1489 1490 1491 1492 1493
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
    int pauseReason;

1494
    if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
1495 1496
        goto error;

1497
    /* This flag should only be set when run on src host */
1498
    if (flags & QEMU_MIGRATION_COMPLETED_CHECK_STORAGE &&
1499
        qemuMigrationSrcNBDStorageCopyReady(vm, asyncJob) < 0)
1500 1501
        goto error;

1502
    if (flags & QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR &&
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
        virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
        pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("failed due to I/O error"));
        goto error;
    }

    if (dconn && virConnectIsAlive(dconn) <= 0) {
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Lost connection to destination host"));
        goto error;
    }

1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
    /* Migration was paused before serializing device state, let's return to
     * the caller so that it can finish all block jobs, resume migration, and
     * wait again for the real end of the migration.
     */
    if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER &&
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
        VIR_DEBUG("Migration paused before switchover");
        return 1;
    }

1526 1527 1528 1529 1530
    /* In case of postcopy the source considers migration completed at the
     * moment it switched from active to postcopy-active state. The destination
     * will continue waiting until the migrate state changes to completed.
     */
    if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY &&
1531
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
1532 1533 1534 1535
        VIR_DEBUG("Migration switched to post-copy");
        return 1;
    }

1536
    if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
1537 1538 1539 1540 1541
        return 1;
    else
        return 0;

 error:
1542 1543 1544
    switch (jobInfo->status) {
    case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
    case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
1545
    case QEMU_DOMAIN_JOB_STATUS_PAUSED:
1546
        /* The migration was aborted by us rather than QEMU itself. */
1547
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1548
        return -2;
1549 1550 1551

    case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
        /* Something failed after QEMU already finished the migration. */
1552
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1553
        return -1;
1554 1555 1556 1557

    case QEMU_DOMAIN_JOB_STATUS_FAILED:
    case QEMU_DOMAIN_JOB_STATUS_CANCELED:
        /* QEMU aborted the migration. */
1558
        return -1;
1559 1560 1561 1562 1563 1564

    case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
    case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
    case QEMU_DOMAIN_JOB_STATUS_NONE:
        /* Impossible. */
        break;
1565
    }
1566 1567

    return -1;
1568 1569 1570
}


1571 1572 1573
/* Returns 0 on success, -2 when migration needs to be cancelled, or -1 when
 * QEMU reports failed migration.
 */
1574
static int
1575 1576 1577 1578 1579
qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  qemuDomainAsyncJob asyncJob,
                                  virConnectPtr dconn,
                                  unsigned int flags)
1580
{
1581
    qemuDomainObjPrivatePtr priv = vm->privateData;
J
Jiri Denemark 已提交
1582
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
1583
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
1584
    int rv;
1585

1586 1587
    jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;

1588 1589
    while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
                                           dconn, flags)) != 1) {
1590 1591
        if (rv < 0)
            return rv;
1592

1593 1594
        if (events) {
            if (virDomainObjWait(vm) < 0) {
1595 1596
                if (virDomainObjIsActive(vm))
                    jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
                return -2;
            }
        } else {
            /* Poll every 50ms for progress & to allow cancellation */
            struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };

            virObjectUnlock(vm);
            nanosleep(&ts, NULL);
            virObjectLock(vm);
        }
1607 1608
    }

1609
    if (events)
1610
        ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL));
1611

1612
    qemuDomainJobInfoUpdateTime(jobInfo);
1613 1614
    qemuDomainJobInfoUpdateDowntime(jobInfo);
    VIR_FREE(priv->job.completed);
1615
    if (VIR_ALLOC(priv->job.completed) == 0) {
1616
        *priv->job.completed = *jobInfo;
1617 1618
        priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
    }
1619

1620 1621 1622 1623
    if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;

1624
    return 0;
1625 1626 1627
}


1628
static int
1629 1630 1631 1632
qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  qemuDomainAsyncJob asyncJob,
                                  bool postcopy)
1633 1634
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1635
    unsigned int flags = 0;
1636 1637 1638 1639 1640 1641 1642
    int rv;

    if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT))
        return 0;

    VIR_DEBUG("Waiting for incoming migration to complete");

1643 1644 1645
    if (postcopy)
        flags = QEMU_MIGRATION_COMPLETED_POSTCOPY;

1646 1647
    while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
                                           NULL, flags)) != 1) {
1648 1649 1650 1651 1652 1653 1654 1655
        if (rv < 0 || virDomainObjWait(vm) < 0)
            return -1;
    }

    return 0;
}


1656
static int
1657 1658 1659 1660
qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 qemuMigrationCookiePtr cookie,
                                 const char *graphicsuri)
1661 1662
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1663 1664
    int ret = -1;
    const char *listenAddress = NULL;
1665
    virSocketAddr addr;
1666 1667 1668 1669 1670
    virURIPtr uri = NULL;
    int type = -1;
    int port = -1;
    int tlsPort = -1;
    const char *tlsSubject = NULL;
1671

1672
    if (!cookie || (!cookie->graphics && !graphicsuri))
1673 1674
        return 0;

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
    if (graphicsuri && !(uri = virURIParse(graphicsuri)))
        goto cleanup;

    if (cookie->graphics) {
        type = cookie->graphics->type;

        listenAddress = cookie->graphics->listen;

        if (!listenAddress ||
            (virSocketAddrParse(&addr, listenAddress, AF_UNSPEC) > 0 &&
             virSocketAddrIsWildcard(&addr)))
            listenAddress = cookie->remoteHostname;

        port = cookie->graphics->port;
        tlsPort = cookie->graphics->tlsPort;
        tlsSubject = cookie->graphics->tlsSubject;
    }

    if (uri) {
1694
        size_t i;
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721

        if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("unknown graphics type %s"), uri->scheme);
            goto cleanup;
        }

        if (uri->server)
            listenAddress = uri->server;
        if (uri->port > 0)
            port = uri->port;

        for (i = 0; i < uri->paramsCount; i++) {
            virURIParamPtr param = uri->params + i;

            if (STRCASEEQ(param->name, "tlsPort")) {
                if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
                    virReportError(VIR_ERR_INVALID_ARG,
                                   _("invalid tlsPort number: %s"),
                                   param->value);
                    goto cleanup;
                }
            } else if (STRCASEEQ(param->name, "tlsSubject")) {
                tlsSubject = param->value;
            }
        }
    }
1722 1723 1724 1725

    /* QEMU doesn't support VNC relocation yet, so
     * skip it to avoid generating an error
     */
1726 1727 1728 1729
    if (type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
        ret = 0;
        goto cleanup;
    }
1730

1731 1732 1733 1734 1735 1736 1737 1738 1739
    /* Older libvirt sends port == 0 for listen type='none' graphics. It's
     * safe to ignore such requests since relocation to unknown port does
     * not make sense in general.
     */
    if (port <= 0 && tlsPort <= 0) {
        ret = 0;
        goto cleanup;
    }

1740 1741 1742 1743
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
        ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
                                          port, tlsPort, tlsSubject);
1744
        priv->job.spiceMigration = !ret;
1745 1746
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
            ret = -1;
1747
    }
1748

1749
 cleanup:
1750
    virURIFree(uri);
1751 1752 1753 1754
    return ret;
}


1755
static int
1756 1757 1758
qemuMigrationDstOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
                            virDomainObjPtr vm,
                            qemuMigrationCookiePtr cookie)
1759
{
1760 1761
    virDomainNetDefPtr netptr;
    int ret = -1;
1762
    size_t i;
1763 1764 1765 1766 1767 1768 1769 1770

    for (i = 0; i < cookie->network->nnets; i++) {
        netptr = vm->def->nets[i];

        switch (cookie->network->net[i].vporttype) {
        case VIR_NETDEV_VPORT_PROFILE_NONE:
        case VIR_NETDEV_VPORT_PROFILE_8021QBG:
        case VIR_NETDEV_VPORT_PROFILE_8021QBH:
1771
           break;
1772
        case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
1773 1774
            if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
                                                   netptr->ifname) != 0) {
J
Jiri Denemark 已提交
1775 1776 1777
                virReportError(VIR_ERR_INTERNAL_ERROR,
                               _("Unable to run command to set OVS port data for "
                                 "interface %s"), netptr->ifname);
1778 1779 1780
                goto cleanup;
            }
            break;
1781 1782 1783 1784 1785
        default:
            break;
        }
    }

1786
    ret = 0;
1787
 cleanup:
1788 1789 1790 1791
    return ret;
}


1792
int
1793 1794
qemuMigrationDstCheckProtocol(virQEMUCapsPtr qemuCaps,
                              const char *migrateFrom)
1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
{
    if (STRPREFIX(migrateFrom, "rdma")) {
        if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("incoming RDMA migration is not supported "
                             "with this QEMU binary"));
            return -1;
        }
    } else if (!STRPREFIX(migrateFrom, "tcp") &&
               !STRPREFIX(migrateFrom, "exec") &&
               !STRPREFIX(migrateFrom, "fd") &&
               !STRPREFIX(migrateFrom, "unix") &&
               STRNEQ(migrateFrom, "stdio")) {
        virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                       _("unknown migration protocol"));
        return -1;
    }

    return 0;
}


char *
1818 1819
qemuMigrationDstGetURI(const char *migrateFrom,
                       int migrateFd)
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
{
    char *uri = NULL;

    if (STREQ(migrateFrom, "stdio"))
        ignore_value(virAsprintf(&uri, "fd:%d", migrateFd));
    else
        ignore_value(VIR_STRDUP(uri, migrateFrom));

    return uri;
}


1832
int
1833 1834 1835 1836
qemuMigrationDstRun(virQEMUDriverPtr driver,
                    virDomainObjPtr vm,
                    const char *uri,
                    qemuDomainAsyncJob asyncJob)
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret = -1;
    int rv;

    VIR_DEBUG("Setting up incoming migration with URI %s", uri);

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    rv = qemuMonitorMigrateIncoming(priv->mon, uri);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        goto cleanup;

    if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
1853
        /* qemuMigrationDstWaitForCompletion is called from the Finish phase */
1854 1855 1856 1857
        ret = 0;
        goto cleanup;
    }

1858
    if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
1859 1860 1861 1862 1863 1864 1865 1866 1867
        goto cleanup;

    ret = 0;

 cleanup:
    return ret;
}


1868 1869 1870 1871 1872 1873
/* This is called for outgoing non-p2p migrations when a connection to the
 * client which initiated the migration was closed but we were waiting for it
 * to follow up with the next phase, that is, in between
 * qemuDomainMigrateBegin3 and qemuDomainMigratePerform3 or
 * qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
 */
1874
static void
1875 1876 1877
qemuMigrationSrcCleanup(virDomainObjPtr vm,
                        virConnectPtr conn,
                        void *opaque)
1878
{
1879
    virQEMUDriverPtr driver = opaque;
1880 1881 1882 1883 1884 1885 1886 1887 1888
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
              vm->def->name, conn,
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
              qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
                                              priv->job.phase));

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
1889
        return;
1890 1891 1892 1893 1894

    VIR_DEBUG("The connection which started outgoing migration of domain %s"
              " was closed; canceling the migration",
              vm->def->name);

1895
    switch ((qemuMigrationJobPhase) priv->job.phase) {
1896 1897 1898 1899 1900 1901 1902 1903 1904
    case QEMU_MIGRATION_PHASE_BEGIN3:
        /* just forget we were about to migrate */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
        VIR_WARN("Migration of domain %s finished but we don't know if the"
                 " domain was successfully started on destination or not",
                 vm->def->name);
1905
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
1906
                                 priv->job.migParams, priv->job.apiFlags);
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
        /* clear the job and let higher levels decide what to do */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3:
        /* cannot be seen without an active migration API; unreachable */
    case QEMU_MIGRATION_PHASE_CONFIRM3:
    case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
        /* all done; unreachable */
    case QEMU_MIGRATION_PHASE_PREPARE:
    case QEMU_MIGRATION_PHASE_FINISH2:
    case QEMU_MIGRATION_PHASE_FINISH3:
        /* incoming migration; unreachable */
    case QEMU_MIGRATION_PHASE_PERFORM2:
        /* single phase outgoing migration; unreachable */
    case QEMU_MIGRATION_PHASE_NONE:
    case QEMU_MIGRATION_PHASE_LAST:
        /* unreachable */
        ;
    }
}

1929

1930
/* The caller is supposed to lock the vm and start a migration job. */
1931
static char *
1932 1933 1934 1935 1936 1937 1938 1939 1940
qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           const char *xmlin,
                           const char *dname,
                           char **cookieout,
                           int *cookieoutlen,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           unsigned long flags)
1941 1942 1943
{
    char *rv = NULL;
    qemuMigrationCookiePtr mig = NULL;
1944
    virDomainDefPtr def = NULL;
1945
    qemuDomainObjPrivatePtr priv = vm->privateData;
1946
    virCapsPtr caps = NULL;
1947
    unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_LOCKSTATE;
1948

1949
    VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, dname=%s,"
1950
              " cookieout=%p, cookieoutlen=%p,"
1951
              " nmigrate_disks=%zu, migrate_disks=%p, flags=0x%lx",
1952
              driver, vm, NULLSTR(xmlin), NULLSTR(dname),
1953 1954
              cookieout, cookieoutlen, nmigrate_disks,
              migrate_disks, flags);
1955

1956 1957 1958
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

1959 1960 1961 1962 1963 1964
    /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
     * Otherwise we will start the async job later in the perform phase losing
     * change protection.
     */
    if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
1965

1966
    if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
1967 1968
        goto cleanup;

1969
    if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
1970
        !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
1971 1972
        goto cleanup;

1973 1974 1975 1976 1977 1978 1979 1980 1981
    if (flags & VIR_MIGRATE_POSTCOPY &&
        (!(flags & VIR_MIGRATE_LIVE) ||
         flags & VIR_MIGRATE_PAUSED)) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy migration is not supported with non-live "
                         "or paused migration"));
        goto cleanup;
    }

1982 1983 1984 1985 1986 1987
    if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy is not supported with tunnelled migration"));
        goto cleanup;
    }

1988 1989
    if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) {
        if (nmigrate_disks) {
1990 1991 1992 1993 1994 1995
            size_t i, j;
            /* Check user requested only known disk targets. */
            for (i = 0; i < nmigrate_disks; i++) {
                for (j = 0; j < vm->def->ndisks; j++) {
                    if (STREQ(vm->def->disks[j]->dst, migrate_disks[i]))
                        break;
1996 1997
                }

1998 1999 2000 2001
                if (j == vm->def->ndisks) {
                    virReportError(VIR_ERR_INVALID_ARG,
                                   _("disk target %s not found"),
                                   migrate_disks[i]);
2002 2003
                    goto cleanup;
                }
2004 2005 2006
            }

            if (flags & VIR_MIGRATE_TUNNELLED) {
2007
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
2008 2009
                               _("Selecting disks to migrate is not "
                                 "implemented for tunnelled migration"));
2010 2011 2012 2013
                goto cleanup;
            }
        }

2014 2015 2016 2017 2018 2019
        /* TODO support NBD for TUNNELLED migration */
        if (flags & VIR_MIGRATE_TUNNELLED) {
            VIR_WARN("NBD in tunnelled migration is currently not supported");
        } else {
            cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
            priv->nbdPort = 0;
2020 2021 2022
        }
    }

2023
    if (virDomainDefHasMemoryHotplug(vm->def) ||
2024
        ((flags & VIR_MIGRATE_PERSIST_DEST) &&
2025
         vm->newDef && virDomainDefHasMemoryHotplug(vm->newDef)))
2026 2027
        cookieFlags |= QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG;

2028 2029 2030 2031 2032
    if (!qemuDomainVcpuHotplugIsInOrder(vm->def) ||
        ((flags & VIR_MIGRATE_PERSIST_DEST) &&
         vm->newDef && !qemuDomainVcpuHotplugIsInOrder(vm->newDef)))
        cookieFlags |= QEMU_MIGRATION_COOKIE_CPU_HOTPLUG;

2033 2034 2035
    if (priv->origCPU)
        cookieFlags |= QEMU_MIGRATION_COOKIE_CPU;

2036 2037
    cookieFlags |= QEMU_MIGRATION_COOKIE_ALLOW_REBOOT;

2038 2039 2040
    if (!(flags & VIR_MIGRATE_OFFLINE))
        cookieFlags |= QEMU_MIGRATION_COOKIE_CAPS;

2041 2042
    if (!(mig = qemuMigrationEatCookie(driver, vm->def,
                                       priv->origname, priv, NULL, 0, 0)))
2043 2044 2045
        goto cleanup;

    if (qemuMigrationBakeCookie(mig, driver, vm,
2046
                                QEMU_MIGRATION_SOURCE,
2047
                                cookieout, cookieoutlen,
2048
                                cookieFlags) < 0)
2049 2050
        goto cleanup;

L
liguang 已提交
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (flags & VIR_MIGRATE_TUNNELLED) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
    }

2073
    if (xmlin) {
2074
        if (!(def = virDomainDefParseString(xmlin, caps, driver->xmlopt, priv->qemuCaps,
2075 2076
                                            VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                            VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
2077 2078
            goto cleanup;

2079
        if (!qemuDomainCheckABIStability(driver, vm, def))
2080 2081
            goto cleanup;

2082
        rv = qemuDomainDefFormatLive(driver, def, NULL, false, true);
2083
    } else {
2084 2085
        rv = qemuDomainDefFormatLive(driver, vm->def, priv->origCPU,
                                     false, true);
2086
    }
2087

2088
 cleanup:
2089
    qemuMigrationCookieFree(mig);
2090
    virObjectUnref(caps);
2091
    virDomainDefFree(def);
2092 2093 2094
    return rv;
}

2095
char *
2096 2097 2098 2099 2100 2101 2102 2103 2104
qemuMigrationSrcBegin(virConnectPtr conn,
                      virDomainObjPtr vm,
                      const char *xmlin,
                      const char *dname,
                      char **cookieout,
                      int *cookieoutlen,
                      size_t nmigrate_disks,
                      const char **migrate_disks,
                      unsigned long flags)
2105 2106 2107
{
    virQEMUDriverPtr driver = conn->privateData;
    char *xml = NULL;
2108
    qemuDomainAsyncJob asyncJob;
2109 2110

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
2111 2112
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                  flags) < 0)
2113 2114 2115 2116 2117 2118 2119 2120
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
    } else {
        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_NONE;
    }

2121
    qemuMigrationSrcStoreDomainState(vm);
2122

2123
    if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
2124 2125 2126 2127 2128 2129
        goto endjob;

    /* Check if there is any ejected media.
     * We don't want to require them on the destination.
     */
    if (!(flags & VIR_MIGRATE_OFFLINE) &&
2130
        qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
2131 2132
        goto endjob;

2133 2134 2135
    if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
                                           cookieout, cookieoutlen,
                                           nmigrate_disks, migrate_disks, flags)))
2136 2137 2138 2139 2140 2141 2142
        goto endjob;

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
        /* We keep the job active across API calls until the confirm() call.
         * This prevents any other APIs being invoked while migration is taking
         * place.
         */
2143
        if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
2144
                                 qemuMigrationSrcCleanup) < 0) {
2145
            VIR_FREE(xml);
2146
            goto endjob;
2147
        }
2148
        qemuMigrationJobContinue(vm);
2149 2150 2151 2152
    } else {
        goto endjob;
    }

2153
 cleanup:
M
Michal Privoznik 已提交
2154
    virDomainObjEndAPI(&vm);
2155 2156
    return xml;

2157
 endjob:
2158 2159 2160 2161
    if (flags & VIR_MIGRATE_CHANGE_PROTECTION)
        qemuMigrationJobFinish(driver, vm);
    else
        qemuDomainObjEndJob(driver, vm);
2162 2163 2164
    goto cleanup;
}

2165

2166 2167
/* Prepare is the first step, and it runs on the destination host.
 */
2168

2169
static void
2170 2171
qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver,
                               virDomainObjPtr vm)
2172 2173 2174 2175 2176 2177 2178 2179 2180
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
              driver,
              vm->def->name,
              qemuDomainJobTypeToString(priv->job.active),
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob));

2181
    virPortAllocatorRelease(priv->migrationPort);
2182 2183
    priv->migrationPort = 0;

2184 2185 2186 2187 2188
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
        return;
    qemuDomainObjDiscardAsyncJob(driver, vm);
}

2189
static qemuProcessIncomingDefPtr
2190 2191 2192 2193 2194 2195
qemuMigrationDstPrepare(virDomainObjPtr vm,
                        bool tunnel,
                        const char *protocol,
                        const char *listenAddress,
                        unsigned short port,
                        int fd)
2196 2197
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
2198
    qemuProcessIncomingDefPtr inc = NULL;
2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
    char *migrateFrom = NULL;

    if (tunnel) {
        if (VIR_STRDUP(migrateFrom, "stdio") < 0)
            goto cleanup;
    } else {
        bool encloseAddress = false;
        bool hostIPv6Capable = false;
        struct addrinfo *info = NULL;
        struct addrinfo hints = { .ai_flags = AI_ADDRCONFIG,
                                  .ai_socktype = SOCK_STREAM };
        const char *incFormat;

        if (getaddrinfo("::", NULL, &hints, &info) == 0) {
            freeaddrinfo(info);
            hostIPv6Capable = true;
        }

        if (listenAddress) {
            if (virSocketAddrNumericFamily(listenAddress) == AF_INET6) {
                if (!hostIPv6Capable) {
                    virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                                   _("host isn't capable of IPv6"));
                    goto cleanup;
                }
                /* IPv6 address must be escaped in brackets on the cmd line */
                encloseAddress = true;
            } else {
                /* listenAddress is a hostname or IPv4 */
            }
2229
        } else if (hostIPv6Capable) {
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
            /* Listen on :: instead of 0.0.0.0 if QEMU understands it
             * and there is at least one IPv6 address configured
             */
            listenAddress = "::";
            encloseAddress = true;
        } else {
            listenAddress = "0.0.0.0";
        }

        /* QEMU will be started with
         *   -incoming protocol:[<IPv6 addr>]:port,
         *   -incoming protocol:<IPv4 addr>:port, or
         *   -incoming protocol:<hostname>:port
         */
        if (encloseAddress)
            incFormat = "%s:[%s]:%d";
        else
            incFormat = "%s:%s:%d";
        if (virAsprintf(&migrateFrom, incFormat,
                        protocol, listenAddress, port) < 0)
            goto cleanup;
    }

2253 2254
    inc = qemuProcessIncomingDefNew(priv->qemuCaps, listenAddress,
                                    migrateFrom, fd, NULL);
2255

2256
 cleanup:
2257 2258
    VIR_FREE(migrateFrom);
    return inc;
2259 2260
}

2261
static int
2262
qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
2263
                           virConnectPtr dconn,
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           virDomainDefPtr *def,
                           const char *origname,
                           virStreamPtr st,
                           const char *protocol,
                           unsigned short port,
                           bool autoPort,
                           const char *listenAddress,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           int nbdPort,
2278
                           qemuMigrationParamsPtr migParams,
2279
                           unsigned long flags)
2280 2281
{
    virDomainObjPtr vm = NULL;
2282
    virObjectEventPtr event = NULL;
2283
    virErrorPtr origErr;
2284
    int ret = -1;
2285
    int dataFD[2] = { -1, -1 };
2286
    qemuDomainObjPrivatePtr priv = NULL;
2287
    qemuMigrationCookiePtr mig = NULL;
2288
    bool tunnel = !!st;
J
Jiri Denemark 已提交
2289
    char *xmlout = NULL;
L
liguang 已提交
2290
    unsigned int cookieFlags;
2291
    unsigned int startFlags;
2292
    virCapsPtr caps = NULL;
2293
    qemuProcessIncomingDefPtr incoming = NULL;
2294
    bool taint_hook = false;
2295 2296 2297
    bool stopProcess = false;
    bool relabel = false;
    int rv;
2298
    char *tlsAlias = NULL;
2299

2300 2301
    virNWFilterReadLockFilterUpdates();

L
liguang 已提交
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (tunnel) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
2322 2323
        cookieFlags = 0;
    } else {
2324 2325
        cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS |
                      QEMU_MIGRATION_COOKIE_CAPS;
L
liguang 已提交
2326 2327
    }

2328 2329 2330 2331 2332 2333 2334 2335 2336
    if (flags & VIR_MIGRATE_POSTCOPY &&
        (!(flags & VIR_MIGRATE_LIVE) ||
         flags & VIR_MIGRATE_PAUSED)) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy migration is not supported with non-live "
                         "or paused migration"));
        goto cleanup;
    }

2337 2338 2339 2340 2341 2342
    if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy is not supported with tunnelled migration"));
        goto cleanup;
    }

2343 2344 2345
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

2346
    if (!qemuMigrationSrcIsAllowedHostdev(*def))
2347 2348
        goto cleanup;

J
Jiri Denemark 已提交
2349 2350 2351 2352 2353
    /* Let migration hook filter domain XML */
    if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
        char *xml;
        int hookret;

2354
        if (!(xml = qemuDomainDefFormatXML(driver, *def,
2355 2356
                                           VIR_DOMAIN_XML_SECURE |
                                           VIR_DOMAIN_XML_MIGRATABLE)))
J
Jiri Denemark 已提交
2357 2358
            goto cleanup;

2359
        hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, (*def)->name,
J
Jiri Denemark 已提交
2360 2361 2362 2363 2364 2365 2366
                              VIR_HOOK_QEMU_OP_MIGRATE, VIR_HOOK_SUBOP_BEGIN,
                              NULL, xml, &xmlout);
        VIR_FREE(xml);

        if (hookret < 0) {
            goto cleanup;
        } else if (hookret == 0) {
2367
            if (virStringIsEmpty(xmlout)) {
J
Jiri Denemark 已提交
2368 2369 2370 2371 2372 2373
                VIR_DEBUG("Migrate hook filter returned nothing; using the"
                          " original XML");
            } else {
                virDomainDefPtr newdef;

                VIR_DEBUG("Using hook-filtered domain XML: %s", xmlout);
2374
                newdef = virDomainDefParseString(xmlout, caps, driver->xmlopt, NULL,
2375 2376
                                                 VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                                 VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE);
J
Jiri Denemark 已提交
2377 2378 2379
                if (!newdef)
                    goto cleanup;

2380
                if (!qemuDomainDefCheckABIStability(driver, *def, newdef)) {
J
Jiri Denemark 已提交
2381 2382 2383 2384
                    virDomainDefFree(newdef);
                    goto cleanup;
                }

2385 2386
                virDomainDefFree(*def);
                *def = newdef;
2387 2388 2389 2390
                /* We should taint the domain here. However, @vm and therefore
                 * privateData too are still NULL, so just notice the fact and
                 * taint it later. */
                taint_hook = true;
J
Jiri Denemark 已提交
2391 2392 2393 2394
            }
        }
    }

2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
    /* Parse cookie earlier than adding the domain onto the
     * domain list. Parsing/validation may fail and there's no
     * point in having the domain in the list at that point. */
    if (!(mig = qemuMigrationEatCookie(driver, *def, origname, NULL,
                                       cookiein, cookieinlen,
                                       QEMU_MIGRATION_COOKIE_LOCKSTATE |
                                       QEMU_MIGRATION_COOKIE_NBD |
                                       QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG |
                                       QEMU_MIGRATION_COOKIE_CPU_HOTPLUG |
                                       QEMU_MIGRATION_COOKIE_CPU |
                                       QEMU_MIGRATION_COOKIE_ALLOW_REBOOT |
                                       QEMU_MIGRATION_COOKIE_CAPS)))
        goto cleanup;

2409
    if (!(vm = virDomainObjListAdd(driver->domains, *def,
2410
                                   driver->xmlopt,
2411 2412 2413
                                   VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
                                   VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
                                   NULL)))
2414
        goto cleanup;
2415
    *def = NULL;
2416

2417
    priv = vm->privateData;
2418 2419
    if (VIR_STRDUP(priv->origname, origname) < 0)
        goto cleanup;
2420

2421 2422 2423 2424 2425
    if (taint_hook) {
        /* Domain XML has been altered by a hook script. */
        priv->hookRun = true;
    }

2426 2427
    if (STREQ_NULLABLE(protocol, "rdma") &&
        !virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
M
Michael R. Hines 已提交
2428 2429 2430 2431 2432 2433
        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                       _("cannot start RDMA migration with no memory hard "
                         "limit set"));
        goto cleanup;
    }

2434 2435 2436
    if (qemuMigrationDstPrecreateStorage(vm, mig->nbd,
                                         nmigrate_disks, migrate_disks,
                                         !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
2437 2438
        goto cleanup;

2439 2440
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
                              flags) < 0)
2441
        goto cleanup;
2442
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
2443 2444 2445 2446

    /* Domain starts inactive, even if the domain XML had an id field. */
    vm->def->id = -1;

L
liguang 已提交
2447 2448 2449
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

2450 2451
    if (tunnel &&
        (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
2452 2453
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
2454
        goto stopjob;
2455 2456
    }

2457 2458
    startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;

2459
    if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
2460
                        true, startFlags) < 0)
2461
        goto stopjob;
2462
    stopProcess = true;
2463

2464 2465
    priv->allowReboot = mig->allowReboot;

2466 2467 2468
    if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol,
                                             listenAddress, port,
                                             dataFD[0])))
2469
        goto stopjob;
2470

2471
    if (qemuProcessPrepareDomain(driver, vm, startFlags) < 0)
2472 2473
        goto stopjob;

2474
    if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
2475 2476
        goto stopjob;

2477
    rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2478 2479
                           incoming, NULL,
                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
2480
                           startFlags);
2481 2482 2483
    if (rv < 0) {
        if (rv == -2)
            relabel = true;
2484
        goto stopjob;
2485
    }
2486
    relabel = true;
2487

2488 2489 2490 2491
    if (tunnel) {
        if (virFDStreamOpen(st, dataFD[1]) < 0) {
            virReportSystemError(errno, "%s",
                                 _("cannot pass pipe for tunnelled migration"));
2492
            goto stopjob;
2493
        }
2494
        dataFD[1] = -1; /* 'st' owns the FD now & will close it */
2495 2496
    }

2497
    if (STREQ_NULLABLE(protocol, "rdma") &&
M
Michael R. Hines 已提交
2498
        virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
2499
        goto stopjob;
M
Michael R. Hines 已提交
2500 2501
    }

2502
    if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2503
                                 migParams, mig->caps->automatic) < 0)
2504 2505 2506 2507 2508 2509 2510
        goto stopjob;

    /* Migrations using TLS need to add the "tls-creds-x509" object and
     * set the migration TLS parameters */
    if (flags & VIR_MIGRATE_TLS) {
        if (qemuMigrationParamsEnableTLS(driver, vm, true,
                                         QEMU_ASYNC_JOB_MIGRATION_IN,
2511
                                         &tlsAlias, NULL,
2512 2513 2514 2515 2516 2517 2518
                                         migParams) < 0)
            goto stopjob;
    } else {
        if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
            goto stopjob;
    }

2519 2520
    if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
                                 migParams) < 0)
2521 2522
        goto stopjob;

2523 2524 2525
    if (mig->nbd &&
        flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
        virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
        const char *nbdTLSAlias = NULL;

        if (flags & VIR_MIGRATE_TLS) {
            if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_TLS)) {
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                               _("QEMU NBD server does not support TLS transport"));
                goto stopjob;
            }

            nbdTLSAlias = tlsAlias;
        }

2538 2539
        if (qemuMigrationDstStartNBDServer(driver, vm, incoming->address,
                                           nmigrate_disks, migrate_disks,
2540
                                           nbdPort, nbdTLSAlias) < 0) {
2541
            goto stopjob;
2542
        }
2543
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
2544 2545
    }

2546 2547 2548 2549 2550 2551 2552 2553 2554
    if (mig->lockState) {
        VIR_DEBUG("Received lockstate %s", mig->lockState);
        VIR_FREE(priv->lockState);
        priv->lockState = mig->lockState;
        mig->lockState = NULL;
    } else {
        VIR_DEBUG("Received no lockstate");
    }

2555
    if (incoming->deferredURI &&
2556 2557
        qemuMigrationDstRun(driver, vm, incoming->deferredURI,
                            QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
2558 2559
        goto stopjob;

2560
    if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2561 2562 2563
                                 false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
        goto stopjob;

2564
 done:
2565 2566 2567
    if (qemuMigrationBakeCookie(mig, driver, vm,
                                QEMU_MIGRATION_DESTINATION,
                                cookieout, cookieoutlen, cookieFlags) < 0) {
2568 2569 2570 2571 2572 2573 2574
        /* We could tear down the whole guest here, but
         * cookie data is (so far) non-critical, so that
         * seems a little harsh. We'll just warn for now.
         */
        VIR_WARN("Unable to encode migration cookie");
    }

2575
    if (qemuDomainCleanupAdd(vm, qemuMigrationDstPrepareCleanup) < 0)
2576
        goto stopjob;
2577

L
liguang 已提交
2578 2579
    if (!(flags & VIR_MIGRATE_OFFLINE)) {
        virDomainAuditStart(vm, "migrated", true);
2580
        event = virDomainEventLifecycleNewFromObj(vm,
L
liguang 已提交
2581 2582 2583
                                         VIR_DOMAIN_EVENT_STARTED,
                                         VIR_DOMAIN_EVENT_STARTED_MIGRATED);
    }
2584

2585 2586 2587 2588
    /* We keep the job active across API calls until the finish() call.
     * This prevents any other APIs being invoked while incoming
     * migration is taking place.
     */
2589
    qemuMigrationJobContinue(vm);
2590

2591 2592
    if (autoPort)
        priv->migrationPort = port;
2593 2594 2595 2596 2597
    /* in this case port is not auto selected and we don't need to manage it
     * anymore after cookie is baked
     */
    if (nbdPort != 0)
        priv->nbdPort = 0;
2598
    ret = 0;
2599

2600
 cleanup:
2601
    virErrorPreserveLast(&origErr);
2602
    VIR_FREE(tlsAlias);
2603
    qemuProcessIncomingDefFree(incoming);
J
Jiri Denemark 已提交
2604
    VIR_FREE(xmlout);
2605 2606
    VIR_FORCE_CLOSE(dataFD[0]);
    VIR_FORCE_CLOSE(dataFD[1]);
2607 2608 2609
    if (ret < 0 && priv) {
        /* priv is set right after vm is added to the list of domains
         * and there is no 'goto cleanup;' in the middle of those */
2610
        VIR_FREE(priv->origname);
2611 2612 2613 2614
        /* release if port is auto selected which is not the case if
         * it is given in parameters
         */
        if (nbdPort == 0)
2615
            virPortAllocatorRelease(priv->nbdPort);
2616
        priv->nbdPort = 0;
2617
        virDomainObjRemoveTransientDef(vm);
2618
        qemuDomainRemoveInactiveJob(driver, vm);
2619
    }
M
Michal Privoznik 已提交
2620
    virDomainObjEndAPI(&vm);
2621
    virObjectEventStateQueue(driver->domainEventState, event);
2622
    qemuMigrationCookieFree(mig);
2623
    virObjectUnref(caps);
2624
    virNWFilterUnlockFilterUpdates();
2625
    virErrorRestore(&origErr);
2626
    return ret;
2627

2628
 stopjob:
2629
    qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2630
                             priv->job.migParams, priv->job.apiFlags);
2631

2632 2633 2634 2635
    if (stopProcess) {
        unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
        if (!relabel)
            stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
2636
        virDomainAuditStart(vm, "migrated", false);
2637 2638
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                        QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
2639
    }
2640

2641
    qemuMigrationJobFinish(driver, vm);
2642
    goto cleanup;
2643 2644 2645
}


2646 2647 2648 2649 2650
/*
 * This version starts an empty VM listening on a localhost TCP port, and
 * sets up the corresponding virStream to handle the incoming data.
 */
int
2651
qemuMigrationDstPrepareTunnel(virQEMUDriverPtr driver,
2652
                              virConnectPtr dconn,
2653 2654 2655 2656 2657 2658 2659
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              virStreamPtr st,
                              virDomainDefPtr *def,
                              const char *origname,
2660
                              qemuMigrationParamsPtr migParams,
2661
                              unsigned long flags)
2662
{
2663
    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2664
              "cookieout=%p, cookieoutlen=%p, st=%p, def=%p, "
2665
              "origname=%s, flags=0x%lx",
2666
              driver, dconn, NULLSTR(cookiein), cookieinlen,
2667
              cookieout, cookieoutlen, st, *def, origname, flags);
2668

2669 2670 2671 2672 2673 2674
    if (st == NULL) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("tunnelled migration requested but NULL stream passed"));
        return -1;
    }

2675 2676 2677
    return qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
                                      cookieout, cookieoutlen, def, origname,
                                      st, NULL, 0, false, NULL, 0, NULL, 0,
2678
                                      migParams, flags);
2679 2680 2681
}


2682
static virURIPtr
2683
qemuMigrationAnyParseURI(const char *uri, bool *wellFormed)
2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
{
    char *tmp = NULL;
    virURIPtr parsed;

    /* For compatibility reasons tcp://... URIs are sent as tcp:...
     * We need to transform them to a well-formed URI before parsing. */
    if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri + 4, "//")) {
        if (virAsprintf(&tmp, "tcp://%s", uri + 4) < 0)
            return NULL;
        uri = tmp;
    }

    parsed = virURIParse(uri);
    if (parsed && wellFormed)
        *wellFormed = !tmp;
    VIR_FREE(tmp);

    return parsed;
}


2705
int
2706
qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
2707
                              virConnectPtr dconn,
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              const char *uri_in,
                              char **uri_out,
                              virDomainDefPtr *def,
                              const char *origname,
                              const char *listenAddress,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
                              int nbdPort,
2720
                              qemuMigrationParamsPtr migParams,
2721
                              unsigned long flags)
2722
{
2723 2724
    unsigned short port = 0;
    bool autoPort = true;
2725 2726
    char *hostname = NULL;
    int ret = -1;
2727
    virURIPtr uri = NULL;
2728 2729
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    const char *migrateHost = cfg->migrateHost;
J
Jiri Denemark 已提交
2730

2731
    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2732
              "cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
2733
              "def=%p, origname=%s, listenAddress=%s, "
2734
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, flags=0x%lx",
2735
              driver, dconn, NULLSTR(cookiein), cookieinlen,
2736
              cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
2737
              *def, origname, NULLSTR(listenAddress),
2738
              nmigrate_disks, migrate_disks, nbdPort, flags);
2739

2740 2741
    *uri_out = NULL;

2742 2743 2744
    /* The URI passed in may be NULL or a string "tcp://somehostname:port".
     *
     * If the URI passed in is NULL then we allocate a port number
2745 2746 2747
     * from our pool of port numbers, and if the migrateHost is configured,
     * we return a URI of "tcp://migrateHost:port", otherwise return a URI
     * of "tcp://ourhostname:port".
2748 2749 2750 2751 2752 2753
     *
     * If the URI passed in is not NULL then we try to parse out the
     * port number and use that (note that the hostname is assumed
     * to be a correct hostname which refers to the target machine).
     */
    if (uri_in == NULL) {
2754 2755 2756
        bool encloseAddress = false;
        const char *incFormat;

2757
        if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2758
            goto cleanup;
2759

2760
        if (migrateHost != NULL) {
2761 2762
            if (virSocketAddrNumericFamily(migrateHost) == AF_INET6)
                encloseAddress = true;
2763

2764
            if (VIR_STRDUP(hostname, migrateHost) < 0)
2765 2766 2767 2768 2769
                goto cleanup;
        } else {
            if ((hostname = virGetHostname()) == NULL)
                goto cleanup;
        }
2770 2771

        if (STRPREFIX(hostname, "localhost")) {
2772 2773 2774
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("hostname on destination resolved to localhost,"
                             " but migration requires an FQDN"));
2775 2776 2777 2778 2779
            goto cleanup;
        }

        /* XXX this really should have been a properly well-formed
         * URI, but we can't add in tcp:// now without breaking
2780
         * compatibility with old targets. We at least make the
2781 2782
         * new targets accept both syntaxes though.
         */
2783 2784 2785 2786 2787 2788
        if (encloseAddress)
            incFormat = "%s:[%s]:%d";
        else
            incFormat = "%s:%s:%d";

        if (virAsprintf(uri_out, incFormat, "tcp", hostname, port) < 0)
2789 2790
            goto cleanup;
    } else {
2791
        bool well_formed_uri;
J
Ján Tomko 已提交
2792

2793
        if (!(uri = qemuMigrationAnyParseURI(uri_in, &well_formed_uri)))
2794
            goto cleanup;
J
Ján Tomko 已提交
2795

2796 2797 2798 2799 2800 2801 2802
        if (uri->scheme == NULL) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("missing scheme in migration URI: %s"),
                           uri_in);
            goto cleanup;
        }

M
Michael R. Hines 已提交
2803 2804
        if (STRNEQ(uri->scheme, "tcp") &&
            STRNEQ(uri->scheme, "rdma")) {
2805 2806 2807
            virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
                           _("unsupported scheme %s in migration URI %s"),
                           uri->scheme, uri_in);
J
Ján Tomko 已提交
2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
            goto cleanup;
        }

        if (uri->server == NULL) {
            virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
                                                  " URI: %s"), uri_in);
            goto cleanup;
        }

        if (uri->port == 0) {
2818
            if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2819
                goto cleanup;
2820

2821
            /* Send well-formed URI only if uri_in was well-formed */
2822 2823 2824 2825 2826 2827 2828 2829
            if (well_formed_uri) {
                uri->port = port;
                if (!(*uri_out = virURIFormat(uri)))
                    goto cleanup;
            } else {
                if (virAsprintf(uri_out, "%s:%d", uri_in, port) < 0)
                    goto cleanup;
            }
2830
        } else {
2831 2832
            port = uri->port;
            autoPort = false;
2833 2834 2835 2836 2837 2838
        }
    }

    if (*uri_out)
        VIR_DEBUG("Generated uri_out=%s", *uri_out);

2839
    ret = qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
2840 2841 2842 2843
                                     cookieout, cookieoutlen, def, origname,
                                     NULL, uri ? uri->scheme : "tcp",
                                     port, autoPort, listenAddress,
                                     nmigrate_disks, migrate_disks, nbdPort,
2844
                                     migParams, flags);
2845
 cleanup:
2846
    virURIFree(uri);
2847
    VIR_FREE(hostname);
2848
    virObjectUnref(cfg);
2849
    if (ret != 0) {
2850
        VIR_FREE(*uri_out);
2851
        if (autoPort)
2852
            virPortAllocatorRelease(port);
2853
    }
2854 2855 2856 2857
    return ret;
}


2858
virDomainDefPtr
2859 2860 2861 2862
qemuMigrationAnyPrepareDef(virQEMUDriverPtr driver,
                           const char *dom_xml,
                           const char *dname,
                           char **origname)
2863 2864 2865
{
    virCapsPtr caps = NULL;
    virDomainDefPtr def;
2866
    char *name = NULL;
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876

    if (!dom_xml) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("no domain XML passed"));
        return NULL;
    }

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        return NULL;

2877
    if (!(def = virDomainDefParseString(dom_xml, caps, driver->xmlopt, NULL,
2878 2879
                                        VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                        VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
2880 2881 2882
        goto cleanup;

    if (dname) {
2883
        name = def->name;
2884 2885 2886 2887 2888 2889
        if (VIR_STRDUP(def->name, dname) < 0) {
            virDomainDefFree(def);
            def = NULL;
        }
    }

2890
 cleanup:
2891
    virObjectUnref(caps);
2892 2893 2894 2895
    if (def && origname)
        *origname = name;
    else
        VIR_FREE(name);
2896 2897 2898 2899
    return def;
}


2900
static int
2901 2902 2903 2904 2905 2906
qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
                             virDomainObjPtr vm,
                             const char *cookiein,
                             int cookieinlen,
                             unsigned int flags,
                             int retcode)
2907 2908
{
    qemuMigrationCookiePtr mig;
2909
    virObjectEventPtr event;
2910 2911
    int rv = -1;
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
2912 2913
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = NULL;
2914

2915
    VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
2916
              "flags=0x%x, retcode=%d",
2917
              driver, vm, NULLSTR(cookiein), cookieinlen,
2918 2919 2920 2921 2922 2923 2924 2925 2926
              flags, retcode);

    virCheckFlags(QEMU_MIGRATION_FLAGS, -1);

    qemuMigrationJobSetPhase(driver, vm,
                             retcode == 0
                             ? QEMU_MIGRATION_PHASE_CONFIRM3
                             : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);

2927 2928
    if (!(mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                       cookiein, cookieinlen,
2929
                                       QEMU_MIGRATION_COOKIE_STATS)))
2930 2931
        goto cleanup;

2932 2933 2934
    if (retcode == 0)
        jobInfo = priv->job.completed;
    else
2935
        VIR_FREE(priv->job.completed);
2936 2937 2938

    /* Update times with the values sent by the destination daemon */
    if (mig->jobInfo && jobInfo) {
2939 2940 2941 2942 2943 2944 2945 2946
        int reason;

        /* We need to refresh migration statistics after a completed post-copy
         * migration since priv->job.completed contains obsolete data from the
         * time we switched to post-copy mode.
         */
        if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
            reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
2947 2948
            qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                       jobInfo, NULL) < 0)
2949 2950
            VIR_WARN("Could not refresh migration statistics");

2951 2952 2953
        qemuDomainJobInfoUpdateTime(jobInfo);
        jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet;
        jobInfo->timeDelta = mig->jobInfo->timeDelta;
2954 2955
        jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set;
        jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime;
2956 2957
    }

2958 2959 2960
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

2961 2962
    /* Did the migration go as planned?  If yes, kill off the domain object.
     * If something failed, resume CPUs, but only if we didn't use post-copy.
2963 2964 2965 2966
     */
    if (retcode == 0) {
        /* If guest uses SPICE and supports seamless migration we have to hold
         * up domain shutdown until SPICE server transfers its data */
2967
        qemuMigrationSrcWaitForSpice(vm);
2968 2969

        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
2970
                        QEMU_ASYNC_JOB_MIGRATION_OUT,
2971 2972 2973
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
        virDomainAuditStop(vm, "migrated");

2974
        event = virDomainEventLifecycleNewFromObj(vm,
2975 2976
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
2977
        virObjectEventStateQueue(driver->domainEventState, event);
2978
        qemuDomainEventEmitJobCompleted(driver, vm);
2979
    } else {
2980
        virErrorPtr orig_err = virSaveLastError();
2981
        int reason;
2982 2983

        /* cancel any outstanding NBD jobs */
2984 2985
        qemuMigrationSrcNBDCopyCancel(driver, vm, false,
                                      QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
2986 2987 2988

        virSetError(orig_err);
        virFreeError(orig_err);
2989

2990
        if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
2991
            reason == VIR_DOMAIN_PAUSED_POSTCOPY)
2992
            qemuMigrationAnyPostcopyFailed(driver, vm);
2993 2994
        else
            qemuMigrationSrcRestoreDomainState(driver, vm);
2995

2996
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
2997
                                 priv->job.migParams, priv->job.apiFlags);
2998

2999
        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
3000 3001 3002
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
    }

3003
 done:
3004 3005 3006
    qemuMigrationCookieFree(mig);
    rv = 0;

3007
 cleanup:
3008 3009 3010 3011 3012
    virObjectUnref(cfg);
    return rv;
}

int
3013 3014 3015 3016 3017 3018
qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        const char *cookiein,
                        int cookieinlen,
                        unsigned int flags,
                        int cancelled)
3019
{
3020
    qemuMigrationJobPhase phase;
3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
    virQEMUDriverConfigPtr cfg = NULL;
    int ret = -1;

    cfg = virQEMUDriverGetConfig(driver);

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
        goto cleanup;

    if (cancelled)
        phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
    else
        phase = QEMU_MIGRATION_PHASE_CONFIRM3;

    qemuMigrationJobStartPhase(driver, vm, phase);
3035
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
3036
                           qemuMigrationSrcCleanup);
3037

3038 3039 3040
    ret = qemuMigrationSrcConfirmPhase(driver, vm,
                                       cookiein, cookieinlen,
                                       flags, cancelled);
3041

3042
    qemuMigrationJobFinish(driver, vm);
3043
    if (!virDomainObjIsActive(vm)) {
3044
        if (!cancelled && ret == 0 && flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
3045
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
3046 3047
            vm->persistent = 0;
        }
3048
        qemuDomainRemoveInactiveJob(driver, vm);
3049 3050
    }

3051
 cleanup:
M
Michal Privoznik 已提交
3052
    virDomainObjEndAPI(&vm);
3053 3054 3055 3056 3057
    virObjectUnref(cfg);
    return ret;
}


3058 3059
enum qemuMigrationDestinationType {
    MIGRATION_DEST_HOST,
3060
    MIGRATION_DEST_CONNECT_HOST,
3061
    MIGRATION_DEST_FD,
3062
};
3063

3064 3065 3066 3067
enum qemuMigrationForwardType {
    MIGRATION_FWD_DIRECT,
    MIGRATION_FWD_STREAM,
};
3068

3069 3070 3071 3072 3073 3074
typedef struct _qemuMigrationSpec qemuMigrationSpec;
typedef qemuMigrationSpec *qemuMigrationSpecPtr;
struct _qemuMigrationSpec {
    enum qemuMigrationDestinationType destType;
    union {
        struct {
3075
            const char *protocol;
3076 3077 3078 3079
            const char *name;
            int port;
        } host;

3080 3081 3082 3083
        struct {
            int qemu;
            int local;
        } fd;
3084 3085 3086 3087 3088 3089 3090
    } dest;

    enum qemuMigrationForwardType fwdType;
    union {
        virStreamPtr stream;
    } fwd;
};
3091 3092 3093

#define TUNNEL_SEND_BUF_SIZE 65536

3094 3095 3096 3097 3098 3099 3100
typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
struct _qemuMigrationIOThread {
    virThread thread;
    virStreamPtr st;
    int sock;
    virError err;
3101 3102
    int wakeupRecvFD;
    int wakeupSendFD;
3103 3104
};

3105
static void qemuMigrationSrcIOFunc(void *arg)
3106
{
3107
    qemuMigrationIOThreadPtr data = arg;
3108 3109 3110 3111 3112 3113 3114
    char *buffer = NULL;
    struct pollfd fds[2];
    int timeout = -1;
    virErrorPtr err = NULL;

    VIR_DEBUG("Running migration tunnel; stream=%p, sock=%d",
              data->st, data->sock);
3115

3116
    if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0)
3117
        goto abrt;
3118

3119 3120 3121
    fds[0].fd = data->sock;
    fds[1].fd = data->wakeupRecvFD;

3122
    for (;;) {
3123 3124 3125 3126 3127 3128 3129 3130 3131 3132
        int ret;

        fds[0].events = fds[1].events = POLLIN;
        fds[0].revents = fds[1].revents = 0;

        ret = poll(fds, ARRAY_CARDINALITY(fds), timeout);

        if (ret < 0) {
            if (errno == EAGAIN || errno == EINTR)
                continue;
3133
            virReportSystemError(errno, "%s",
3134 3135
                                 _("poll failed in migration tunnel"));
            goto abrt;
3136
        }
3137 3138 3139 3140 3141 3142 3143

        if (ret == 0) {
            /* We were asked to gracefully stop but reading would block. This
             * can only happen if qemu told us migration finished but didn't
             * close the migration fd. We handle this in the same way as EOF.
             */
            VIR_DEBUG("QEMU forgot to close migration fd");
3144
            break;
3145
        }
3146

3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
        if (fds[1].revents & (POLLIN | POLLERR | POLLHUP)) {
            char stop = 0;

            if (saferead(data->wakeupRecvFD, &stop, 1) != 1) {
                virReportSystemError(errno, "%s",
                                     _("failed to read from wakeup fd"));
                goto abrt;
            }

            VIR_DEBUG("Migration tunnel was asked to %s",
                      stop ? "abort" : "finish");
            if (stop) {
                goto abrt;
            } else {
                timeout = 0;
            }
3163 3164
        }

3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181
        if (fds[0].revents & (POLLIN | POLLERR | POLLHUP)) {
            int nbytes;

            nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
            if (nbytes > 0) {
                if (virStreamSend(data->st, buffer, nbytes) < 0)
                    goto error;
            } else if (nbytes < 0) {
                virReportSystemError(errno, "%s",
                        _("tunnelled migration failed to read from qemu"));
                goto abrt;
            } else {
                /* EOF; get out of here */
                break;
            }
        }
    }
3182

3183 3184
    if (virStreamFinish(data->st) < 0)
        goto error;
3185

3186
    VIR_FORCE_CLOSE(data->sock);
3187 3188
    VIR_FREE(buffer);

3189 3190
    return;

3191
 abrt:
3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202
    err = virSaveLastError();
    if (err && err->code == VIR_ERR_OK) {
        virFreeError(err);
        err = NULL;
    }
    virStreamAbort(data->st);
    if (err) {
        virSetError(err);
        virFreeError(err);
    }

3203
 error:
3204 3205 3206 3207 3208
    /* Let the source qemu know that the transfer cant continue anymore.
     * Don't copy the error for EPIPE as destination has the actual error. */
    VIR_FORCE_CLOSE(data->sock);
    if (!virLastErrorIsSystemErrno(EPIPE))
        virCopyLastError(&data->err);
3209
    virResetLastError();
3210
    VIR_FREE(buffer);
3211 3212 3213 3214
}


static qemuMigrationIOThreadPtr
3215 3216
qemuMigrationSrcStartTunnel(virStreamPtr st,
                            int sock)
3217
{
3218 3219
    qemuMigrationIOThreadPtr io = NULL;
    int wakeupFD[2] = { -1, -1 };
3220

3221 3222 3223 3224
    if (pipe2(wakeupFD, O_CLOEXEC) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to make pipe"));
        goto error;
3225 3226
    }

3227
    if (VIR_ALLOC(io) < 0)
3228
        goto error;
3229

3230 3231
    io->st = st;
    io->sock = sock;
3232 3233
    io->wakeupRecvFD = wakeupFD[0];
    io->wakeupSendFD = wakeupFD[1];
3234 3235

    if (virThreadCreate(&io->thread, true,
3236
                        qemuMigrationSrcIOFunc,
3237 3238 3239
                        io) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to create migration thread"));
3240
        goto error;
3241 3242 3243
    }

    return io;
3244

3245
 error:
3246 3247 3248 3249
    VIR_FORCE_CLOSE(wakeupFD[0]);
    VIR_FORCE_CLOSE(wakeupFD[1]);
    VIR_FREE(io);
    return NULL;
3250 3251 3252
}

static int
3253
qemuMigrationSrcStopTunnel(qemuMigrationIOThreadPtr io, bool error)
3254 3255
{
    int rv = -1;
3256 3257 3258 3259 3260 3261 3262 3263 3264
    char stop = error ? 1 : 0;

    /* make sure the thread finishes its job and is joinable */
    if (safewrite(io->wakeupSendFD, &stop, 1) != 1) {
        virReportSystemError(errno, "%s",
                             _("failed to wakeup migration tunnel"));
        goto cleanup;
    }

3265 3266 3267 3268
    virThreadJoin(&io->thread);

    /* Forward error from the IO thread, to this thread */
    if (io->err.code != VIR_ERR_OK) {
3269 3270 3271 3272
        if (error)
            rv = 0;
        else
            virSetError(&io->err);
3273 3274 3275 3276 3277 3278
        virResetError(&io->err);
        goto cleanup;
    }

    rv = 0;

3279
 cleanup:
3280 3281
    VIR_FORCE_CLOSE(io->wakeupSendFD);
    VIR_FORCE_CLOSE(io->wakeupRecvFD);
3282 3283
    VIR_FREE(io);
    return rv;
3284 3285
}

3286
static int
3287 3288 3289
qemuMigrationSrcConnect(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        qemuMigrationSpecPtr spec)
3290 3291 3292 3293 3294 3295 3296
{
    virNetSocketPtr sock;
    const char *host;
    char *port = NULL;
    int ret = -1;

    host = spec->dest.host.name;
3297
    if (virAsprintf(&port, "%d", spec->dest.host.port) < 0)
3298 3299 3300 3301 3302
        return -1;

    spec->destType = MIGRATION_DEST_FD;
    spec->dest.fd.qemu = -1;

3303
    if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
3304
        goto cleanup;
3305 3306 3307
    if (virNetSocketNewConnectTCP(host, port,
                                  AF_UNSPEC,
                                  &sock) == 0) {
3308
        spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
3309
        virObjectUnref(sock);
3310
    }
3311
    if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0 ||
3312 3313 3314
        spec->dest.fd.qemu == -1)
        goto cleanup;

3315 3316 3317 3318 3319 3320 3321
    /* Migration expects a blocking FD */
    if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
        virReportSystemError(errno, _("Unable to set FD %d blocking"),
                             spec->dest.fd.qemu);
        goto cleanup;
    }

3322 3323
    ret = 0;

3324
 cleanup:
3325 3326 3327 3328 3329 3330
    VIR_FREE(port);
    if (ret < 0)
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
    return ret;
}

3331 3332

static int
3333 3334 3335 3336
qemuMigrationSrcContinue(virQEMUDriverPtr driver,
                         virDomainObjPtr vm,
                         qemuMonitorMigrationStatus status,
                         qemuDomainAsyncJob asyncJob)
3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    ret = qemuMonitorMigrateContinue(priv->mon, status);

    if (qemuDomainObjExitMonitor(driver, vm) < 0)
        ret = -1;

    return ret;
}


3353
static int
3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
qemuMigrationSrcRun(virQEMUDriverPtr driver,
                    virDomainObjPtr vm,
                    const char *persist_xml,
                    const char *cookiein,
                    int cookieinlen,
                    char **cookieout,
                    int *cookieoutlen,
                    unsigned long flags,
                    unsigned long resource,
                    qemuMigrationSpecPtr spec,
                    virConnectPtr dconn,
                    const char *graphicsuri,
                    size_t nmigrate_disks,
                    const char **migrate_disks,
3368
                    qemuMigrationParamsPtr migParams)
3369
{
3370
    int ret = -1;
3371 3372
    unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
    qemuDomainObjPrivatePtr priv = vm->privateData;
3373
    qemuMigrationCookiePtr mig = NULL;
3374
    char *tlsAlias = NULL;
3375
    qemuMigrationIOThreadPtr iothread = NULL;
3376
    int fd = -1;
3377
    unsigned long migrate_speed = resource ? resource : priv->migMaxBandwidth;
3378
    virErrorPtr orig_err = NULL;
3379
    unsigned int cookieFlags = 0;
3380
    bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
3381
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
3382
    bool cancel = false;
3383
    unsigned int waitFlags;
3384
    virDomainDefPtr persistDef = NULL;
3385
    char *timestamp;
3386
    int rc;
3387 3388

    VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
3389
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3390 3391
              "spec=%p (dest=%d, fwd=%d), dconn=%p, graphicsuri=%s, "
              "nmigrate_disks=%zu, migrate_disks=%p",
3392 3393
              driver, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, resource,
3394
              spec, spec->destType, spec->fwdType, dconn,
3395
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3396

3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
    if (flags & VIR_MIGRATE_NON_SHARED_DISK) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

    if (flags & VIR_MIGRATE_NON_SHARED_INC) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

3407 3408
    if (virLockManagerPluginUsesState(driver->lockManager) &&
        !cookieout) {
3409 3410 3411 3412
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Migration with lock driver %s requires"
                         " cookie support"),
                       virLockManagerPluginGetName(driver->lockManager));
3413 3414 3415
        return -1;
    }

3416 3417 3418
    if (events)
        priv->signalIOError = abort_on_error;

3419 3420
    if (flags & VIR_MIGRATE_PERSIST_DEST) {
        if (persist_xml) {
3421 3422
            if (!(persistDef = qemuMigrationAnyPrepareDef(driver, persist_xml,
                                                          NULL, NULL)))
3423
                goto error;
3424 3425 3426
        } else {
            virDomainDefPtr def = vm->newDef ? vm->newDef : vm->def;
            if (!(persistDef = qemuDomainDefCopy(driver, def,
3427 3428
                                                 VIR_DOMAIN_XML_SECURE |
                                                 VIR_DOMAIN_XML_MIGRATABLE)))
3429
                goto error;
3430 3431 3432
        }
    }

3433 3434
    mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                 cookiein, cookieinlen,
3435 3436 3437
                                 cookieFlags |
                                 QEMU_MIGRATION_COOKIE_GRAPHICS |
                                 QEMU_MIGRATION_COOKIE_CAPS);
3438
    if (!mig)
3439
        goto error;
3440

3441
    if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
3442 3443
        VIR_WARN("unable to provide data for graphics client relocation");

3444
    if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
3445
                                 migParams, mig->caps->automatic) < 0)
3446 3447
        goto error;

3448
    if (flags & VIR_MIGRATE_TLS) {
3449
        const char *hostname = NULL;
3450 3451 3452 3453

        /* We need to add tls-hostname whenever QEMU itself does not
         * connect directly to the destination. */
        if (spec->destType == MIGRATION_DEST_CONNECT_HOST ||
3454 3455 3456
            spec->destType == MIGRATION_DEST_FD)
            hostname = spec->dest.host.name;

3457
        if (qemuMigrationParamsEnableTLS(driver, vm, false,
3458
                                         QEMU_ASYNC_JOB_MIGRATION_OUT,
3459
                                         &tlsAlias, hostname,
3460 3461
                                         migParams) < 0)
            goto error;
3462
    } else {
3463
        if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
3464
            goto error;
3465 3466
    }

3467 3468 3469 3470
    if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                 migParams) < 0)
        goto error;

3471 3472 3473
    if (migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                         QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
        if (mig->nbd) {
3474 3475 3476 3477
            /* Currently libvirt does not support setting up of the NBD
             * non-shared storage migration with TLS. As we need to honour the
             * VIR_MIGRATE_TLS flag, we need to reject such migration until
             * we implement TLS for NBD. */
3478 3479
            if (flags & VIR_MIGRATE_TLS &&
                !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_DEL)) {
3480 3481 3482 3483 3484
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                               _("NBD migration with TLS is not supported"));
                goto error;
            }

3485
            /* This will update migrate_flags on success */
3486 3487 3488 3489 3490 3491
            if (qemuMigrationSrcNBDStorageCopy(driver, vm, mig,
                                               spec->dest.host.name,
                                               migrate_speed,
                                               &migrate_flags,
                                               nmigrate_disks,
                                               migrate_disks,
3492
                                               dconn, tlsAlias, flags) < 0) {
3493
                goto error;
3494 3495 3496 3497 3498 3499 3500
            }
        } else {
            /* Destination doesn't support NBD server.
             * Fall back to previous implementation. */
            VIR_DEBUG("Destination doesn't support NBD server "
                      "Falling back to previous implementation.");
        }
3501 3502
    }

3503
    /* Before EnterMonitor, since already qemuProcessStopCPUs does that */
3504 3505
    if (!(flags & VIR_MIGRATE_LIVE) &&
        virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
3506 3507
        if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
                                QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3508
            goto error;
3509 3510
    }

3511 3512
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3513
        goto error;
3514

3515
    if (priv->job.abortJob) {
3516 3517
        /* explicitly do this *after* we entered the monitor,
         * as this is a critical section so we are guaranteed
3518
         * priv->job.abortJob will not change */
3519
        priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
3520 3521 3522
        virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                       qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                       _("canceled by client"));
3523
        goto exit_monitor;
3524 3525
    }

3526 3527
    if (qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0)
        goto exit_monitor;
3528

3529 3530
    /* connect to the destination qemu if needed */
    if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
3531
        qemuMigrationSrcConnect(driver, vm, spec) < 0) {
3532
        goto exit_monitor;
3533
    }
3534

3535 3536 3537 3538 3539 3540
    /* log start of migration */
    if ((timestamp = virTimeStringNow()) != NULL) {
        qemuDomainLogAppendMessage(driver, vm, "%s: initiating migration\n", timestamp);
        VIR_FREE(timestamp);
    }

3541
    rc = -1;
3542 3543
    switch (spec->destType) {
    case MIGRATION_DEST_HOST:
M
Michael R. Hines 已提交
3544 3545
        if (STREQ(spec->dest.host.protocol, "rdma") &&
            virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
3546
            goto exit_monitor;
M
Michael R. Hines 已提交
3547
        }
3548 3549 3550 3551
        rc = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
                                      spec->dest.host.protocol,
                                      spec->dest.host.name,
                                      spec->dest.host.port);
3552 3553
        break;

3554 3555 3556 3557
    case MIGRATION_DEST_CONNECT_HOST:
        /* handled above and transformed into MIGRATION_DEST_FD */
        break;

3558
    case MIGRATION_DEST_FD:
3559
        if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3560
            fd = spec->dest.fd.local;
3561 3562
            spec->dest.fd.local = -1;
        }
3563 3564
        rc = qemuMonitorMigrateToFd(priv->mon, migrate_flags,
                                    spec->dest.fd.qemu);
3565 3566
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
        break;
3567
    }
3568 3569

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
3570
        goto error;
3571 3572 3573

    /* From this point onwards we *must* call cancel to abort the
     * migration on source if anything goes wrong */
3574
    cancel = true;
3575

3576
    if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3577
        if (!(iothread = qemuMigrationSrcStartTunnel(spec->fwd.stream, fd)))
3578
            goto error;
3579 3580 3581 3582 3583
        /* If we've created a tunnel, then the 'fd' will be closed in the
         * qemuMigrationIOFunc as data->sock.
         */
        fd = -1;
    }
3584

3585
    waitFlags = QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
3586 3587 3588 3589 3590 3591 3592
    if (abort_on_error)
        waitFlags |= QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR;
    if (mig->nbd)
        waitFlags |= QEMU_MIGRATION_COMPLETED_CHECK_STORAGE;
    if (flags & VIR_MIGRATE_POSTCOPY)
        waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;

3593 3594 3595
    rc = qemuMigrationSrcWaitForCompletion(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT,
                                           dconn, waitFlags);
3596
    if (rc == -2) {
3597
        goto error;
3598 3599 3600 3601 3602
    } else if (rc == -1) {
        /* QEMU reported failed migration, nothing to cancel anymore */
        cancel = false;
        goto error;
    }
3603

3604 3605 3606
    /* When migration completed, QEMU will have paused the CPUs for us.
     * Wait for the STOP event to be processed or explicitly stop CPUs
     * (for old QEMU which does not send events) to release the lock state.
3607
     */
3608 3609 3610 3611 3612
    if (priv->monJSON) {
        while (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
            priv->signalStop = true;
            rc = virDomainObjWait(vm);
            priv->signalStop = false;
3613
            if (rc < 0)
3614
                goto error;
3615
        }
3616
    } else if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
3617 3618
               qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
                                   QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
3619
        goto error;
3620
    }
3621

J
Ján Tomko 已提交
3622
    if (mig->nbd &&
3623 3624 3625
        qemuMigrationSrcNBDCopyCancel(driver, vm, true,
                                      QEMU_ASYNC_JOB_MIGRATION_OUT,
                                      dconn) < 0)
3626
        goto error;
3627

3628 3629 3630 3631 3632
    /* When migration was paused before serializing device state we need to
     * resume it now once we finished all block jobs and wait for the real
     * end of the migration.
     */
    if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
3633 3634 3635
        if (qemuMigrationSrcContinue(driver, vm,
                                     QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
                                     QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3636 3637 3638 3639
            goto error;

        waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;

3640 3641 3642
        rc = qemuMigrationSrcWaitForCompletion(driver, vm,
                                               QEMU_ASYNC_JOB_MIGRATION_OUT,
                                               dconn, waitFlags);
3643 3644 3645 3646 3647 3648 3649 3650 3651
        if (rc == -2) {
            goto error;
        } else if (rc == -1) {
            /* QEMU reported failed migration, nothing to cancel anymore */
            cancel = false;
            goto error;
        }
    }

3652 3653 3654 3655
    if (iothread) {
        qemuMigrationIOThreadPtr io;

        VIR_STEAL_PTR(io, iothread);
3656
        if (qemuMigrationSrcStopTunnel(io, false) < 0)
3657
            goto error;
3658 3659 3660
    }

    if (priv->job.completed) {
3661
        priv->job.completed->stopped = priv->job.current->stopped;
3662 3663 3664 3665 3666 3667 3668 3669 3670
        qemuDomainJobInfoUpdateTime(priv->job.completed);
        qemuDomainJobInfoUpdateDowntime(priv->job.completed);
        ignore_value(virTimeMillisNow(&priv->job.completed->sent));
    }

    cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK |
                   QEMU_MIGRATION_COOKIE_STATS;

    if (qemuMigrationCookieAddPersistent(mig, &persistDef) < 0 ||
3671 3672 3673
        qemuMigrationBakeCookie(mig, driver, vm,
                                QEMU_MIGRATION_SOURCE,
                                cookieout, cookieoutlen, cookieFlags) < 0) {
3674 3675
        VIR_WARN("Unable to encode migration cookie");
    }
3676

3677
    ret = 0;
3678

3679
 cleanup:
3680
    VIR_FREE(tlsAlias);
3681
    VIR_FORCE_CLOSE(fd);
3682
    virDomainDefFree(persistDef);
3683 3684
    qemuMigrationCookieFree(mig);

3685 3686 3687
    if (events)
        priv->signalIOError = false;

3688 3689 3690 3691 3692
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

3693 3694
    return ret;

3695
 error:
3696 3697
    orig_err = virSaveLastError();

3698 3699 3700 3701 3702 3703 3704 3705
    if (virDomainObjIsActive(vm)) {
        if (cancel &&
            priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED &&
            qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
            qemuMonitorMigrateCancel(priv->mon);
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
        }
3706

3707 3708
        /* cancel any outstanding NBD jobs */
        if (mig && mig->nbd)
3709 3710 3711
            qemuMigrationSrcNBDCopyCancel(driver, vm, false,
                                          QEMU_ASYNC_JOB_MIGRATION_OUT,
                                          dconn);
3712 3713 3714 3715

        if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
            priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
    }
3716 3717

    if (iothread)
3718
        qemuMigrationSrcStopTunnel(iothread, true);
3719 3720 3721

    goto cleanup;

3722 3723
 exit_monitor:
    ignore_value(qemuDomainObjExitMonitor(driver, vm));
3724
    goto error;
3725 3726
}

3727
/* Perform migration using QEMU's native migrate support,
3728 3729
 * not encrypted obviously
 */
3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
static int
qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              const char *persist_xml,
                              const char *uri,
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              unsigned long flags,
                              unsigned long resource,
                              virConnectPtr dconn,
                              const char *graphicsuri,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
3745
                              qemuMigrationParamsPtr migParams)
3746
{
3747
    qemuDomainObjPrivatePtr priv = vm->privateData;
M
Martin Kletzander 已提交
3748
    virURIPtr uribits = NULL;
3749
    int ret = -1;
3750 3751 3752
    qemuMigrationSpec spec;

    VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
3753
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3754
              "graphicsuri=%s, nmigrate_disks=%zu migrate_disks=%p",
3755
              driver, vm, uri, NULLSTR(cookiein), cookieinlen,
3756
              cookieout, cookieoutlen, flags, resource,
3757
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3758

3759
    if (!(uribits = qemuMigrationAnyParseURI(uri, NULL)))
3760 3761
        return -1;

3762 3763 3764 3765 3766 3767 3768
    if (uribits->scheme == NULL) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("missing scheme in migration URI: %s"),
                       uri);
        goto cleanup;
    }

M
Michael R. Hines 已提交
3769 3770 3771 3772 3773
    if (STREQ(uribits->scheme, "rdma")) {
        if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("outgoing RDMA migration is not supported "
                             "with this QEMU binary"));
3774
            goto cleanup;
M
Michael R. Hines 已提交
3775
        }
3776
        if (!virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
M
Michael R. Hines 已提交
3777 3778 3779
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("cannot start RDMA migration with no memory hard "
                             "limit set"));
3780
            goto cleanup;
M
Michael R. Hines 已提交
3781 3782 3783
        }
    }

3784 3785 3786 3787
    /* RDMA and multi-fd migration requires QEMU to connect to the destination
     * itself.
     */
    if (STREQ(uribits->scheme, "rdma") || (flags & VIR_MIGRATE_PARALLEL))
3788
        spec.destType = MIGRATION_DEST_HOST;
3789 3790
    else
        spec.destType = MIGRATION_DEST_CONNECT_HOST;
3791
    spec.dest.host.protocol = uribits->scheme;
3792 3793 3794
    spec.dest.host.name = uribits->server;
    spec.dest.host.port = uribits->port;
    spec.fwdType = MIGRATION_FWD_DIRECT;
3795

3796 3797 3798
    ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
                              cookieoutlen, flags, resource, &spec, dconn,
                              graphicsuri, nmigrate_disks, migrate_disks,
3799
                              migParams);
3800 3801 3802 3803

    if (spec.destType == MIGRATION_DEST_FD)
        VIR_FORCE_CLOSE(spec.dest.fd.qemu);

3804
 cleanup:
3805
    virURIFree(uribits);
3806 3807 3808 3809 3810

    return ret;
}


3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825
static int
qemuMigrationSrcPerformTunnel(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              virStreamPtr st,
                              const char *persist_xml,
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              unsigned long flags,
                              unsigned long resource,
                              virConnectPtr dconn,
                              const char *graphicsuri,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
3826
                              qemuMigrationParamsPtr migParams)
3827 3828 3829
{
    int ret = -1;
    qemuMigrationSpec spec;
3830
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
3831
    int fds[2] = { -1, -1 };
3832 3833

    VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
3834
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3835
              "graphicsuri=%s, nmigrate_disks=%zu, migrate_disks=%p",
3836
              driver, vm, st, NULLSTR(cookiein), cookieinlen,
3837
              cookieout, cookieoutlen, flags, resource,
3838
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3839 3840 3841 3842

    spec.fwdType = MIGRATION_FWD_STREAM;
    spec.fwd.stream = st;

3843

3844 3845 3846
    spec.destType = MIGRATION_DEST_FD;
    spec.dest.fd.qemu = -1;
    spec.dest.fd.local = -1;
3847

3848 3849 3850 3851 3852
    if (pipe2(fds, O_CLOEXEC) == 0) {
        spec.dest.fd.qemu = fds[1];
        spec.dest.fd.local = fds[0];
    }
    if (spec.dest.fd.qemu == -1 ||
3853 3854
        qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
                                    spec.dest.fd.qemu) < 0) {
3855 3856 3857
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
        goto cleanup;
3858 3859
    }

3860 3861 3862
    ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen,
                              cookieout, cookieoutlen, flags, resource, &spec,
                              dconn, graphicsuri, nmigrate_disks, migrate_disks,
3863
                              migParams);
3864

3865
 cleanup:
3866 3867
    VIR_FORCE_CLOSE(spec.dest.fd.qemu);
    VIR_FORCE_CLOSE(spec.dest.fd.local);
3868

3869
    virObjectUnref(cfg);
3870 3871 3872 3873
    return ret;
}


3874 3875 3876 3877
/* This is essentially a re-impl of virDomainMigrateVersion2
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
3878 3879 3880 3881 3882 3883 3884 3885
static int
qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver,
                                  virConnectPtr sconn,
                                  virConnectPtr dconn,
                                  virDomainObjPtr vm,
                                  const char *dconnuri,
                                  unsigned long flags,
                                  const char *dname,
3886 3887
                                  unsigned long resource,
                                  qemuMigrationParamsPtr migParams)
3888 3889 3890
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
3891
    char *cookie = NULL;
3892 3893 3894
    char *dom_xml = NULL;
    int cookielen = 0, ret;
    virErrorPtr orig_err = NULL;
3895
    bool cancelled;
3896
    virStreamPtr st = NULL;
3897
    unsigned long destflags;
3898

3899
    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
3900
              "flags=0x%lx, dname=%s, resource=%lu",
3901 3902
              driver, sconn, dconn, vm, NULLSTR(dconnuri),
              flags, NULLSTR(dname), resource);
3903

3904 3905 3906 3907 3908
    /* In version 2 of the protocol, the prepare step is slightly
     * different.  We fetch the domain XML of the source domain
     * and pass it to Prepare2.
     */
    if (!(dom_xml = qemuDomainFormatXML(driver, vm,
3909 3910
                                        QEMU_DOMAIN_FORMAT_LIVE_FLAGS |
                                        VIR_DOMAIN_XML_MIGRATABLE)))
3911 3912 3913 3914 3915
        return -1;

    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

3916 3917
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
3918

3919 3920 3921 3922 3923 3924 3925 3926 3927 3928
    VIR_DEBUG("Prepare2 %p", dconn);
    if (flags & VIR_MIGRATE_TUNNELLED) {
        /*
         * Tunnelled Migrate Version 2 does not support cookies
         * due to missing parameters in the prepareTunnel() API.
         */

        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

3929
        qemuDomainObjEnterRemote(vm);
3930
        ret = dconn->driver->domainMigratePrepareTunnel
3931
            (dconn, st, destflags, dname, resource, dom_xml);
3932 3933
        if (qemuDomainObjExitRemote(vm, true) < 0)
            goto cleanup;
3934
    } else {
3935
        qemuDomainObjEnterRemote(vm);
3936 3937
        ret = dconn->driver->domainMigratePrepare2
            (dconn, &cookie, &cookielen, NULL, &uri_out,
3938
             destflags, dname, resource, dom_xml);
3939 3940
        if (qemuDomainObjExitRemote(vm, true) < 0)
            goto cleanup;
3941 3942 3943
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
3944 3945
        goto cleanup;

3946 3947
    if (!(flags & VIR_MIGRATE_TUNNELLED) &&
        (uri_out == NULL)) {
3948 3949
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare2 did not set uri"));
3950
        cancelled = true;
3951
        orig_err = virSaveLastError();
3952
        goto finish;
3953 3954
    }

3955 3956 3957 3958
    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete.
     */
    VIR_DEBUG("Perform %p", sconn);
3959
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
3960
    if (flags & VIR_MIGRATE_TUNNELLED)
3961 3962 3963
        ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL,
                                            NULL, 0, NULL, NULL,
                                            flags, resource, dconn,
3964
                                            NULL, 0, NULL, migParams);
3965
    else
3966 3967 3968 3969
        ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri_out,
                                            cookie, cookielen,
                                            NULL, NULL, /* No out cookie with v2 migration */
                                            flags, resource, dconn, NULL, 0, NULL,
3970
                                            migParams);
3971 3972 3973 3974

    /* Perform failed. Make sure Finish doesn't overwrite the error */
    if (ret < 0)
        orig_err = virSaveLastError();
3975

3976 3977 3978
    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
3979
    cancelled = ret < 0;
3980

3981
 finish:
3982 3983 3984 3985
    /* In version 2 of the migration protocol, we pass the
     * status code from the sender to the destination host,
     * so it can do any cleanup if the migration failed.
     */
3986
    dname = dname ? dname : vm->def->name;
3987
    VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
3988
    qemuDomainObjEnterRemote(vm);
3989
    ddomain = dconn->driver->domainMigrateFinish2
3990
        (dconn, dname, cookie, cookielen,
3991
         uri_out ? uri_out : dconnuri, destflags, cancelled);
3992 3993
    /* The domain is already gone at this point */
    ignore_value(qemuDomainObjExitRemote(vm, false));
3994 3995
    if (cancelled && ddomain)
        VIR_ERROR(_("finish step ignored that migration was cancelled"));
3996

3997
 cleanup:
3998
    if (ddomain) {
3999
        virObjectUnref(ddomain);
4000 4001 4002 4003
        ret = 0;
    } else {
        ret = -1;
    }
4004

4005
    virObjectUnref(st);
4006 4007 4008 4009 4010 4011

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
4012
    VIR_FREE(cookie);
4013 4014

    return ret;
4015 4016 4017
}


4018 4019 4020 4021
/* This is essentially a re-impl of virDomainMigrateVersion3
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
4022
static int
4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver,
                                  virConnectPtr sconn,
                                  virConnectPtr dconn,
                                  const char *dconnuri,
                                  virDomainObjPtr vm,
                                  const char *xmlin,
                                  const char *persist_xml,
                                  const char *dname,
                                  const char *uri,
                                  const char *graphicsuri,
                                  const char *listenAddress,
                                  size_t nmigrate_disks,
                                  const char **migrate_disks,
                                  int nbdPort,
4037
                                  qemuMigrationParamsPtr migParams,
4038 4039 4040
                                  unsigned long long bandwidth,
                                  bool useParams,
                                  unsigned long flags)
4041 4042 4043 4044 4045 4046 4047 4048 4049 4050
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
    char *cookiein = NULL;
    char *cookieout = NULL;
    char *dom_xml = NULL;
    int cookieinlen = 0;
    int cookieoutlen = 0;
    int ret = -1;
    virErrorPtr orig_err = NULL;
4051
    bool cancelled = true;
4052
    virStreamPtr st = NULL;
4053
    unsigned long destflags;
4054 4055 4056
    virTypedParameterPtr params = NULL;
    int nparams = 0;
    int maxparams = 0;
4057
    size_t i;
4058
    bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
4059 4060

    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, dconnuri=%s, vm=%p, xmlin=%s, "
4061
              "dname=%s, uri=%s, graphicsuri=%s, listenAddress=%s, "
4062
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
4063
              "bandwidth=%llu, useParams=%d, flags=0x%lx",
4064
              driver, sconn, dconn, NULLSTR(dconnuri), vm, NULLSTR(xmlin),
4065
              NULLSTR(dname), NULLSTR(uri), NULLSTR(graphicsuri),
4066
              NULLSTR(listenAddress), nmigrate_disks, migrate_disks, nbdPort,
4067
              bandwidth, useParams, flags);
4068

4069 4070 4071 4072 4073
    /* Unlike the virDomainMigrateVersion3 counterpart, we don't need
     * to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
     * bit here, because we are already running inside the context of
     * a single job.  */

4074 4075 4076
    dom_xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
                                         &cookieout, &cookieoutlen,
                                         nmigrate_disks, migrate_disks, flags);
4077 4078 4079
    if (!dom_xml)
        goto cleanup;

4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099
    if (useParams) {
        if (virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_XML, dom_xml) < 0)
            goto cleanup;

        if (dname &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
            goto cleanup;

        if (uri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_URI, uri) < 0)
            goto cleanup;

        if (bandwidth &&
            virTypedParamsAddULLong(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_BANDWIDTH,
                                    bandwidth) < 0)
            goto cleanup;
4100 4101 4102 4103 4104 4105

        if (graphicsuri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_GRAPHICS_URI,
                                    graphicsuri) < 0)
            goto cleanup;
4106 4107 4108 4109 4110
        if (listenAddress &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_LISTEN_ADDRESS,
                                    listenAddress) < 0)
            goto cleanup;
4111 4112 4113 4114 4115
        for (i = 0; i < nmigrate_disks; i++)
            if (virTypedParamsAddString(&params, &nparams, &maxparams,
                                        VIR_MIGRATE_PARAM_MIGRATE_DISKS,
                                        migrate_disks[i]) < 0)
                goto cleanup;
4116 4117 4118 4119 4120
        if (nbdPort &&
            virTypedParamsAddInt(&params, &nparams, &maxparams,
                                 VIR_MIGRATE_PARAM_DISKS_PORT,
                                 nbdPort) < 0)
            goto cleanup;
4121

4122 4123
        if (qemuMigrationParamsDump(migParams, &params, &nparams,
                                    &maxparams, &flags) < 0)
4124
            goto cleanup;
4125 4126
    }

4127 4128 4129
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

4130 4131
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
4132

4133
    VIR_DEBUG("Prepare3 %p", dconn);
4134
    VIR_STEAL_PTR(cookiein, cookieout);
4135 4136 4137 4138 4139 4140
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
    if (flags & VIR_MIGRATE_TUNNELLED) {
        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

4141
        qemuDomainObjEnterRemote(vm);
4142 4143 4144 4145 4146 4147 4148 4149 4150
        if (useParams) {
            ret = dconn->driver->domainMigratePrepareTunnel3Params
                (dconn, st, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepareTunnel3
                (dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 destflags, dname, bandwidth, dom_xml);
        }
4151 4152
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4153
    } else {
4154
        qemuDomainObjEnterRemote(vm);
4155 4156 4157 4158 4159 4160 4161 4162 4163
        if (useParams) {
            ret = dconn->driver->domainMigratePrepare3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, &uri_out, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepare3
                (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 uri, &uri_out, destflags, dname, bandwidth, dom_xml);
        }
4164 4165
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4166 4167 4168 4169 4170
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
        goto cleanup;

4171
    if (offline) {
L
liguang 已提交
4172 4173 4174
        VIR_DEBUG("Offline migration, skipping Perform phase");
        VIR_FREE(cookieout);
        cookieoutlen = 0;
4175
        cancelled = false;
L
liguang 已提交
4176 4177 4178
        goto finish;
    }

4179 4180 4181 4182
    if (uri_out) {
        uri = uri_out;
        if (useParams &&
            virTypedParamsReplaceString(&params, &nparams,
4183 4184
                                        VIR_MIGRATE_PARAM_URI, uri_out) < 0) {
            orig_err = virSaveLastError();
4185
            goto finish;
4186
        }
4187
    } else if (!uri && !(flags & VIR_MIGRATE_TUNNELLED)) {
4188 4189
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare3 did not set uri"));
4190
        orig_err = virSaveLastError();
4191 4192 4193 4194 4195 4196 4197 4198
        goto finish;
    }

    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete. The src VM should remain
     * running, but in paused state until the destination can
     * confirm migration completion.
     */
4199
    VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri));
4200
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4201
    VIR_FREE(cookiein);
4202
    VIR_STEAL_PTR(cookiein, cookieout);
4203 4204
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
4205
    if (flags & VIR_MIGRATE_TUNNELLED) {
4206 4207 4208 4209
        ret = qemuMigrationSrcPerformTunnel(driver, vm, st, persist_xml,
                                            cookiein, cookieinlen,
                                            &cookieout, &cookieoutlen,
                                            flags, bandwidth, dconn, graphicsuri,
4210
                                            nmigrate_disks, migrate_disks,
4211
                                            migParams);
4212
    } else {
4213 4214 4215 4216
        ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri,
                                            cookiein, cookieinlen,
                                            &cookieout, &cookieoutlen,
                                            flags, bandwidth, dconn, graphicsuri,
4217
                                            nmigrate_disks, migrate_disks,
4218
                                            migParams);
4219
    }
4220 4221

    /* Perform failed. Make sure Finish doesn't overwrite the error */
4222
    if (ret < 0) {
4223
        orig_err = virSaveLastError();
4224 4225 4226 4227
    } else {
        qemuMigrationJobSetPhase(driver, vm,
                                 QEMU_MIGRATION_PHASE_PERFORM3_DONE);
    }
4228 4229 4230 4231

    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
4232
    cancelled = ret < 0;
4233

4234
 finish:
4235 4236 4237 4238 4239 4240 4241 4242
    /*
     * The status code from the source is passed to the destination.
     * The dest can cleanup in the source indicated it failed to
     * send all migration data. Returns NULL for ddomain if
     * the dest was unable to complete migration.
     */
    VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
    VIR_FREE(cookiein);
4243
    VIR_STEAL_PTR(cookiein, cookieout);
4244 4245
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258

    if (useParams) {
        if (virTypedParamsGetString(params, nparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, NULL) <= 0 &&
            virTypedParamsReplaceString(&params, &nparams,
                                        VIR_MIGRATE_PARAM_DEST_NAME,
                                        vm->def->name) < 0) {
            ddomain = NULL;
        } else {
            qemuDomainObjEnterRemote(vm);
            ddomain = dconn->driver->domainMigrateFinish3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags, cancelled);
4259 4260
            if (qemuDomainObjExitRemote(vm, !offline) < 0)
                goto cleanup;
4261 4262 4263 4264 4265 4266 4267
        }
    } else {
        dname = dname ? dname : vm->def->name;
        qemuDomainObjEnterRemote(vm);
        ddomain = dconn->driver->domainMigrateFinish3
            (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
             dconnuri, uri, destflags, cancelled);
4268 4269
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4270
    }
4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290

    if (cancelled) {
        if (ddomain) {
            VIR_ERROR(_("finish step ignored that migration was cancelled"));
        } else {
            /* If Finish reported a useful error, use it instead of the
             * original "migration unexpectedly failed" error.
             *
             * This is ugly but we can't do better with the APIs we have. We
             * only replace the error if Finish was called with cancelled == 1
             * and reported a real error (old libvirt would report an error
             * from RPC instead of MIGRATE_FINISH_OK), which only happens when
             * the domain died on destination. To further reduce a possibility
             * of false positives we also check that Perform returned
             * VIR_ERR_OPERATION_FAILED.
             */
            if (orig_err &&
                orig_err->domain == VIR_FROM_QEMU &&
                orig_err->code == VIR_ERR_OPERATION_FAILED) {
                virErrorPtr err = virGetLastError();
4291 4292
                if (err &&
                    err->domain == VIR_FROM_QEMU &&
4293 4294 4295 4296 4297 4298 4299
                    err->code != VIR_ERR_MIGRATE_FINISH_OK) {
                    virFreeError(orig_err);
                    orig_err = NULL;
                }
            }
        }
    }
4300

4301 4302 4303 4304 4305 4306 4307
    /* If ddomain is NULL, then we were unable to start
     * the guest on the target, and must restart on the
     * source. There is a small chance that the ddomain
     * is NULL due to an RPC failure, in which case
     * ddomain could in fact be running on the dest.
     * The lock manager plugins should take care of
     * safety in this scenario.
4308
     */
4309
    cancelled = ddomain == NULL;
4310

4311 4312 4313 4314 4315 4316
    /* If finish3 set an error, and we don't have an earlier
     * one we need to preserve it in case confirm3 overwrites
     */
    if (!orig_err)
        orig_err = virSaveLastError();

4317 4318 4319 4320
    /*
     * If cancelled, then src VM will be restarted, else
     * it will be killed
     */
4321
    VIR_DEBUG("Confirm3 %p cancelled=%d vm=%p", sconn, cancelled, vm);
4322
    VIR_FREE(cookiein);
4323
    VIR_STEAL_PTR(cookiein, cookieout);
4324 4325
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
4326 4327 4328
    ret = qemuMigrationSrcConfirmPhase(driver, vm,
                                       cookiein, cookieinlen,
                                       flags, cancelled);
4329 4330 4331 4332
    /* If Confirm3 returns -1, there's nothing more we can
     * do, but fortunately worst case is that there is a
     * domain left in 'paused' state on source.
     */
4333 4334 4335
    if (ret < 0)
        VIR_WARN("Guest %s probably left in 'paused' state on source",
                 vm->def->name);
4336 4337 4338

 cleanup:
    if (ddomain) {
4339
        virObjectUnref(ddomain);
4340 4341 4342 4343 4344
        ret = 0;
    } else {
        ret = -1;
    }

4345
    virObjectUnref(st);
4346 4347 4348 4349 4350 4351 4352 4353

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
    VIR_FREE(cookiein);
    VIR_FREE(cookieout);
4354
    virTypedParamsFree(params, nparams);
4355 4356 4357 4358
    return ret;
}


4359
static void
4360 4361 4362
qemuMigrationSrcConnectionClosed(virConnectPtr conn,
                                 int reason,
                                 void *opaque)
4363 4364 4365 4366 4367 4368 4369 4370
{
    virDomainObjPtr vm = opaque;

    VIR_DEBUG("conn=%p, reason=%d, vm=%s", conn, reason, vm->def->name);
    virDomainObjBroadcast(vm);
}


4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382
static int virConnectCredType[] = {
    VIR_CRED_AUTHNAME,
    VIR_CRED_PASSPHRASE,
};


static virConnectAuth virConnectAuthConfig = {
    .credtype = virConnectCredType,
    .ncredtype = ARRAY_CARDINALITY(virConnectCredType),
};


4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395
static int
qemuMigrationSrcPerformPeer2Peer(virQEMUDriverPtr driver,
                                 virConnectPtr sconn,
                                 virDomainObjPtr vm,
                                 const char *xmlin,
                                 const char *persist_xml,
                                 const char *dconnuri,
                                 const char *uri,
                                 const char *graphicsuri,
                                 const char *listenAddress,
                                 size_t nmigrate_disks,
                                 const char **migrate_disks,
                                 int nbdPort,
4396
                                 qemuMigrationParamsPtr migParams,
4397 4398 4399 4400
                                 unsigned long flags,
                                 const char *dname,
                                 unsigned long resource,
                                 bool *v3proto)
4401 4402 4403 4404
{
    int ret = -1;
    virConnectPtr dconn = NULL;
    bool p2p;
4405
    virErrorPtr orig_err = NULL;
4406
    bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
4407
    bool dstOffline = false;
4408
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4409
    bool useParams;
4410

4411 4412
    VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, uri=%s, "
              "graphicsuri=%s, listenAddress=%s, nmigrate_disks=%zu, "
4413
              "migrate_disks=%p, nbdPort=%d, flags=0x%lx, dname=%s, "
4414
              "resource=%lu",
4415
              driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4416
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4417 4418
              nmigrate_disks, migrate_disks, nbdPort, flags, NULLSTR(dname),
              resource);
4419

4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433
    if (flags & VIR_MIGRATE_TUNNELLED && uri) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("migration URI is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

    if (flags & VIR_MIGRATE_TUNNELLED && listenAddress) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("listen address is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

4434 4435 4436 4437 4438 4439 4440
    if (flags & VIR_MIGRATE_TUNNELLED && nbdPort) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("disk port address is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

4441 4442 4443 4444
    /* the order of operations is important here; we make sure the
     * destination side is completely setup before we touch the source
     */

4445
    qemuDomainObjEnterRemote(vm);
4446
    dconn = virConnectOpenAuth(dconnuri, &virConnectAuthConfig, 0);
4447 4448 4449
    if (qemuDomainObjExitRemote(vm, !offline) < 0)
        goto cleanup;

4450
    if (dconn == NULL) {
4451
        virReportError(VIR_ERR_OPERATION_FAILED,
4452 4453
                       _("Failed to connect to remote libvirt URI %s: %s"),
                       dconnuri, virGetLastErrorMessage());
4454
        virObjectUnref(cfg);
4455 4456 4457
        return -1;
    }

4458 4459
    if (virConnectSetKeepAlive(dconn, cfg->keepAliveInterval,
                               cfg->keepAliveCount) < 0)
4460 4461
        goto cleanup;

4462
    if (virConnectRegisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed,
4463 4464 4465 4466
                                        vm, NULL) < 0) {
        goto cleanup;
    }

4467
    qemuDomainObjEnterRemote(vm);
4468 4469
    p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                   VIR_DRV_FEATURE_MIGRATION_P2P);
4470
        /* v3proto reflects whether the caller used Perform3, but with
4471
         * p2p migrate, regardless of whether Perform2 or Perform3
4472 4473 4474 4475
         * were used, we decide protocol based on what target supports
         */
    *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                        VIR_DRV_FEATURE_MIGRATION_V3);
4476 4477
    useParams = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                         VIR_DRV_FEATURE_MIGRATION_PARAMS);
4478
    if (offline)
4479 4480
        dstOffline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                              VIR_DRV_FEATURE_MIGRATION_OFFLINE);
4481 4482
    if (qemuDomainObjExitRemote(vm, !offline) < 0)
        goto cleanup;
4483

4484
    if (!p2p) {
4485 4486
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Destination libvirt does not support peer-to-peer migration protocol"));
4487 4488 4489
        goto cleanup;
    }

4490 4491
    /* Only xmlin, dname, uri, and bandwidth parameters can be used with
     * old-style APIs. */
4492
    if (!useParams && (graphicsuri || listenAddress || nmigrate_disks)) {
4493 4494 4495 4496 4497 4498
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("Migration APIs with extensible parameters are not "
                         "supported but extended parameters were passed"));
        goto cleanup;
    }

4499
    if (offline && !dstOffline) {
L
liguang 已提交
4500 4501 4502 4503 4504 4505
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("offline migration is not supported by "
                         "the destination host"));
        goto cleanup;
    }

4506 4507 4508 4509 4510 4511 4512
    /* Change protection is only required on the source side (us), and
     * only for v3 migration when begin and perform are separate jobs.
     * But peer-2-peer is already a single job, and we still want to
     * talk to older destinations that would reject the flag.
     * Therefore it is safe to clear the bit here.  */
    flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;

4513
    if (*v3proto) {
4514 4515 4516
        ret = qemuMigrationSrcPerformPeer2Peer3(driver, sconn, dconn, dconnuri, vm, xmlin,
                                                persist_xml, dname, uri, graphicsuri,
                                                listenAddress, nmigrate_disks, migrate_disks,
4517
                                                nbdPort, migParams, resource,
4518
                                                useParams, flags);
4519
    } else {
4520
        ret = qemuMigrationSrcPerformPeer2Peer2(driver, sconn, dconn, vm,
4521
                                                dconnuri, flags, dname, resource,
4522
                                                migParams);
4523
    }
4524

4525
 cleanup:
4526
    orig_err = virSaveLastError();
4527
    qemuDomainObjEnterRemote(vm);
4528
    virConnectUnregisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed);
4529
    virObjectUnref(dconn);
4530
    ignore_value(qemuDomainObjExitRemote(vm, false));
4531 4532 4533 4534
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
4535
    virObjectUnref(cfg);
4536 4537 4538 4539
    return ret;
}


4540 4541 4542 4543 4544 4545
/*
 * This implements perform part of the migration protocol when migration job
 * does not need to be active across several APIs, i.e., peer2peer migration or
 * perform phase of v2 non-peer2peer migration.
 */
static int
4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557
qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
                           virConnectPtr conn,
                           virDomainObjPtr vm,
                           const char *xmlin,
                           const char *persist_xml,
                           const char *dconnuri,
                           const char *uri,
                           const char *graphicsuri,
                           const char *listenAddress,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           int nbdPort,
4558
                           qemuMigrationParamsPtr migParams,
4559 4560 4561 4562 4563 4564 4565 4566
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           unsigned long flags,
                           const char *dname,
                           unsigned long resource,
                           bool v3proto)
4567
{
4568
    virObjectEventPtr event = NULL;
4569
    int ret = -1;
4570
    virErrorPtr orig_err = NULL;
4571
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4572
    qemuDomainObjPrivatePtr priv = vm->privateData;
4573

4574 4575
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                              flags) < 0)
4576 4577
        goto cleanup;

4578
    if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
4579 4580
        goto endjob;

4581
    if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
4582
        goto endjob;
4583

4584
    if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
4585
        !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
4586
        goto endjob;
4587

4588
    qemuMigrationSrcStoreDomainState(vm);
4589 4590

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
4591 4592 4593
        ret = qemuMigrationSrcPerformPeer2Peer(driver, conn, vm, xmlin, persist_xml,
                                               dconnuri, uri, graphicsuri, listenAddress,
                                               nmigrate_disks, migrate_disks, nbdPort,
4594
                                               migParams, flags, dname, resource,
4595
                                               &v3proto);
4596
    } else {
4597
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
4598 4599 4600
        ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
                                            cookieout, cookieoutlen,
                                            flags, resource, NULL, NULL, 0, NULL,
4601
                                            migParams);
4602
    }
4603 4604
    if (ret < 0)
        goto endjob;
4605

4606 4607 4608 4609
    /*
     * In v3 protocol, the source VM is not killed off until the
     * confirm step.
     */
4610
    if (!v3proto) {
4611
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
4612
                        QEMU_ASYNC_JOB_MIGRATION_OUT,
4613
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
4614
        virDomainAuditStop(vm, "migrated");
4615
        event = virDomainEventLifecycleNewFromObj(vm,
4616 4617
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
4618 4619
    }

4620
 endjob:
4621 4622 4623
    if (ret < 0)
        orig_err = virSaveLastError();

4624 4625 4626 4627
    /* v2 proto has no confirm phase so we need to reset migration parameters
     * here
     */
    if (!v3proto && ret < 0)
4628
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
4629
                                 priv->job.migParams, priv->job.apiFlags);
4630

4631
    qemuMigrationSrcRestoreDomainState(driver, vm);
4632

4633
    qemuMigrationJobFinish(driver, vm);
4634
    if (!virDomainObjIsActive(vm) && ret == 0) {
4635
        if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
4636
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
4637 4638
            vm->persistent = 0;
        }
4639
        qemuDomainRemoveInactiveJob(driver, vm);
4640 4641
    }

4642 4643 4644 4645 4646
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

4647
 cleanup:
4648
    virObjectEventStateQueue(driver->domainEventState, event);
4649
    virObjectUnref(cfg);
4650 4651 4652 4653 4654 4655 4656
    return ret;
}

/*
 * This implements perform phase of v3 migration protocol.
 */
static int
4657 4658 4659 4660 4661 4662 4663 4664
qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
                             virConnectPtr conn,
                             virDomainObjPtr vm,
                             const char *persist_xml,
                             const char *uri,
                             const char *graphicsuri,
                             size_t nmigrate_disks,
                             const char **migrate_disks,
4665
                             qemuMigrationParamsPtr migParams,
4666 4667 4668 4669 4670 4671
                             const char *cookiein,
                             int cookieinlen,
                             char **cookieout,
                             int *cookieoutlen,
                             unsigned long flags,
                             unsigned long resource)
4672
{
4673
    qemuDomainObjPrivatePtr priv = vm->privateData;
4674 4675 4676 4677
    int ret = -1;

    /* If we didn't start the job in the begin phase, start it now. */
    if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
4678 4679
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                  flags) < 0)
4680 4681 4682 4683 4684 4685
            goto cleanup;
    } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
        goto cleanup;
    }

    qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4686
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
4687
                           qemuMigrationSrcCleanup);
4688

4689 4690 4691
    ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
                                        cookieout, cookieoutlen,
                                        flags, resource, NULL, graphicsuri,
4692
                                        nmigrate_disks, migrate_disks, migParams);
4693

4694
    if (ret < 0) {
4695
        qemuMigrationSrcRestoreDomainState(driver, vm);
4696
        goto endjob;
4697
    }
4698 4699 4700

    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);

4701
    if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
4702
                             qemuMigrationSrcCleanup) < 0)
4703 4704
        goto endjob;

4705
 endjob:
4706
    if (ret < 0) {
4707
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
4708
                                 priv->job.migParams, priv->job.apiFlags);
4709
        qemuMigrationJobFinish(driver, vm);
4710
    } else {
4711
        qemuMigrationJobContinue(vm);
4712 4713
    }

4714
    if (!virDomainObjIsActive(vm))
4715
        qemuDomainRemoveInactiveJob(driver, vm);
4716

4717
 cleanup:
4718 4719 4720
    return ret;
}

4721
int
4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733
qemuMigrationSrcPerform(virQEMUDriverPtr driver,
                        virConnectPtr conn,
                        virDomainObjPtr vm,
                        const char *xmlin,
                        const char *persist_xml,
                        const char *dconnuri,
                        const char *uri,
                        const char *graphicsuri,
                        const char *listenAddress,
                        size_t nmigrate_disks,
                        const char **migrate_disks,
                        int nbdPort,
4734
                        qemuMigrationParamsPtr migParams,
4735 4736 4737 4738 4739 4740 4741 4742
                        const char *cookiein,
                        int cookieinlen,
                        char **cookieout,
                        int *cookieoutlen,
                        unsigned long flags,
                        const char *dname,
                        unsigned long resource,
                        bool v3proto)
4743 4744
{
    VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
4745
              "uri=%s, graphicsuri=%s, listenAddress=%s, "
4746
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
4747
              "cookiein=%s, cookieinlen=%d, cookieout=%p, cookieoutlen=%p, "
4748
              "flags=0x%lx, dname=%s, resource=%lu, v3proto=%d",
4749
              driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4750
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4751 4752 4753
              nmigrate_disks, migrate_disks, nbdPort,
              NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen,
              flags, NULLSTR(dname), resource, v3proto);
4754 4755 4756

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
        if (cookieinlen) {
4757 4758
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("received unexpected cookie with P2P migration"));
4759 4760 4761
            return -1;
        }

4762 4763 4764
        return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
                                          graphicsuri, listenAddress,
                                          nmigrate_disks, migrate_disks, nbdPort,
4765
                                          migParams,
4766 4767 4768
                                          cookiein, cookieinlen,
                                          cookieout, cookieoutlen,
                                          flags, dname, resource, v3proto);
4769 4770
    } else {
        if (dconnuri) {
4771 4772
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           "%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
4773 4774 4775 4776
            return -1;
        }

        if (v3proto) {
4777 4778 4779
            return qemuMigrationSrcPerformPhase(driver, conn, vm, persist_xml, uri,
                                                graphicsuri,
                                                nmigrate_disks, migrate_disks,
4780
                                                migParams,
4781 4782 4783
                                                cookiein, cookieinlen,
                                                cookieout, cookieoutlen,
                                                flags, resource);
4784
        } else {
4785 4786 4787
            return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
                                              uri, graphicsuri, listenAddress,
                                              nmigrate_disks, migrate_disks, nbdPort,
4788
                                              migParams,
4789 4790 4791
                                              cookiein, cookieinlen,
                                              cookieout, cookieoutlen, flags,
                                              dname, resource, v3proto);
4792 4793 4794
        }
    }
}
4795

4796
static int
4797
qemuMigrationDstVPAssociatePortProfiles(virDomainDefPtr def)
4798
{
4799
    size_t i;
4800 4801 4802 4803 4804
    int last_good_net = -1;
    virDomainNetDefPtr net;

    for (i = 0; i < def->nnets; i++) {
        net = def->nets[i];
4805
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4806
            if (virNetDevVPortProfileAssociate(net->ifname,
4807
                                               virDomainNetGetActualVirtPortProfile(net),
4808
                                               &net->mac,
4809
                                               virDomainNetGetActualDirectDev(net),
4810
                                               -1,
4811
                                               def->uuid,
4812 4813
                                               VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
                                               false) < 0) {
4814 4815 4816
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("Port profile Associate failed for %s"),
                               net->ifname);
4817
                goto err_exit;
4818
            }
4819
            last_good_net = i;
4820
            VIR_DEBUG("Port profile Associate succeeded for %s", net->ifname);
4821

4822
            if (virNetDevMacVLanVPortProfileRegisterCallback(net->ifname, &net->mac,
4823 4824 4825 4826
                                                             virDomainNetGetActualDirectDev(net), def->uuid,
                                                             virDomainNetGetActualVirtPortProfile(net),
                                                             VIR_NETDEV_VPORT_PROFILE_OP_CREATE))
                goto err_exit;
4827 4828 4829
        }
    }

4830
    return 0;
4831

4832
 err_exit:
4833
    for (i = 0; last_good_net != -1 && i <= last_good_net; i++) {
4834
        net = def->nets[i];
4835
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4836
            ignore_value(virNetDevVPortProfileDisassociate(net->ifname,
4837
                                                           virDomainNetGetActualVirtPortProfile(net),
4838
                                                           &net->mac,
4839
                                                           virDomainNetGetActualDirectDev(net),
4840
                                                           -1,
4841
                                                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH));
4842 4843
        }
    }
4844
    return -1;
4845 4846 4847
}


J
Jiri Denemark 已提交
4848
static int
4849 4850 4851 4852
qemuMigrationDstPersist(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        qemuMigrationCookiePtr mig,
                        bool ignoreSaveError)
J
Jiri Denemark 已提交
4853 4854 4855 4856
{
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    virCapsPtr caps = NULL;
    virDomainDefPtr vmdef;
4857 4858
    virDomainDefPtr oldDef = NULL;
    unsigned int oldPersist = vm->persistent;
J
Jiri Denemark 已提交
4859 4860 4861 4862 4863 4864 4865
    virObjectEventPtr event;
    int ret = -1;

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

    vm->persistent = 1;
4866 4867
    oldDef = vm->newDef;
    vm->newDef = qemuMigrationCookieGetPersistent(mig);
J
Jiri Denemark 已提交
4868

4869 4870
    if (!(vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm)))
        goto error;
J
Jiri Denemark 已提交
4871

4872 4873
    if (virDomainSaveConfig(cfg->configDir, driver->caps, vmdef) < 0 &&
        !ignoreSaveError)
4874
        goto error;
J
Jiri Denemark 已提交
4875 4876 4877

    event = virDomainEventLifecycleNewFromObj(vm,
                                              VIR_DOMAIN_EVENT_DEFINED,
4878 4879 4880
                                              oldPersist ?
                                              VIR_DOMAIN_EVENT_DEFINED_UPDATED :
                                              VIR_DOMAIN_EVENT_DEFINED_ADDED);
4881
    virObjectEventStateQueue(driver->domainEventState, event);
J
Jiri Denemark 已提交
4882 4883 4884 4885

    ret = 0;

 cleanup:
4886
    virDomainDefFree(oldDef);
J
Jiri Denemark 已提交
4887 4888 4889
    virObjectUnref(caps);
    virObjectUnref(cfg);
    return ret;
4890 4891 4892 4893 4894 4895 4896

 error:
    virDomainDefFree(vm->newDef);
    vm->persistent = oldPersist;
    vm->newDef = oldDef;
    oldDef = NULL;
    goto cleanup;
J
Jiri Denemark 已提交
4897 4898 4899
}


4900
virDomainPtr
4901 4902 4903 4904 4905 4906 4907 4908 4909 4910
qemuMigrationDstFinish(virQEMUDriverPtr driver,
                       virConnectPtr dconn,
                       virDomainObjPtr vm,
                       const char *cookiein,
                       int cookieinlen,
                       char **cookieout,
                       int *cookieoutlen,
                       unsigned long flags,
                       int retcode,
                       bool v3proto)
4911 4912
{
    virDomainPtr dom = NULL;
4913
    qemuMigrationCookiePtr mig = NULL;
4914
    virErrorPtr orig_err = NULL;
4915
    int cookie_flags = 0;
J
Jiri Denemark 已提交
4916
    qemuDomainObjPrivatePtr priv = vm->privateData;
4917
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4918
    unsigned short port;
4919 4920
    unsigned long long timeReceived = 0;
    virObjectEventPtr event;
4921
    qemuDomainJobInfoPtr jobInfo = NULL;
4922
    bool inPostCopy = false;
4923
    bool doKill = true;
4924

4925
    VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
4926
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, retcode=%d",
4927 4928
              driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, retcode);
4929

4930 4931 4932
    port = priv->migrationPort;
    priv->migrationPort = 0;

4933
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
4934
        qemuMigrationDstErrorReport(driver, vm->def->name);
4935
        goto cleanup;
4936
    }
4937

4938 4939
    ignore_value(virTimeMillisNow(&timeReceived));

4940 4941 4942
    qemuMigrationJobStartPhase(driver, vm,
                               v3proto ? QEMU_MIGRATION_PHASE_FINISH3
                                       : QEMU_MIGRATION_PHASE_FINISH2);
4943

4944
    qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
4945
    VIR_FREE(priv->job.completed);
4946

4947
    cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
4948 4949
                   QEMU_MIGRATION_COOKIE_STATS |
                   QEMU_MIGRATION_COOKIE_NBD;
4950 4951 4952
    /* Some older versions of libvirt always send persistent XML in the cookie
     * even though VIR_MIGRATE_PERSIST_DEST was not used. */
    cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
4953

4954 4955
    if (!(mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                       cookiein, cookieinlen, cookie_flags)))
4956
        goto endjob;
4957

4958
    if (flags & VIR_MIGRATE_OFFLINE) {
4959
        if (retcode == 0 &&
4960
            qemuMigrationDstPersist(driver, vm, mig, false) == 0)
4961
            dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, -1);
4962 4963
        goto endjob;
    }
4964

4965 4966 4967 4968
    if (retcode != 0) {
        /* Check for a possible error on the monitor in case Finish was called
         * earlier than monitor EOF handler got a chance to process the error
         */
4969
        qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
4970 4971
        goto endjob;
    }
4972

4973 4974 4975
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
4976
        qemuMigrationDstErrorReport(driver, vm->def->name);
4977 4978
        goto endjob;
    }
4979

4980
    if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
4981
        goto endjob;
4982

4983
    if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0)
4984
        VIR_WARN("unable to provide network data for relocation");
4985

4986
    if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0)
4987
        goto endjob;
4988

4989 4990
    if (qemuRefreshVirtioChannelState(driver, vm,
                                      QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
4991 4992
        goto endjob;

4993 4994
    if (qemuConnectAgent(driver, vm) < 0)
        goto endjob;
4995

4996
    if (flags & VIR_MIGRATE_PERSIST_DEST) {
4997
        if (qemuMigrationDstPersist(driver, vm, mig, !v3proto) < 0) {
4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008
            /* Hmpf.  Migration was successful, but making it persistent
             * was not.  If we report successful, then when this domain
             * shuts down, management tools are in for a surprise.  On the
             * other hand, if we report failure, then the management tools
             * might try to restart the domain on the source side, even
             * though the domain is actually running on the destination.
             * Pretend success and hope that this is a rare situation and
             * management tools are smart.
             *
             * However, in v3 protocol, the source VM is still available
             * to restart during confirm() step, so we kill it off now.
5009 5010 5011
             */
            if (v3proto)
                goto endjob;
5012
        }
5013
    }
5014

5015 5016 5017
    /* We need to wait for QEMU to process all data sent by the source
     * before starting guest CPUs.
     */
5018 5019 5020
    if (qemuMigrationDstWaitForCompletion(driver, vm,
                                          QEMU_ASYNC_JOB_MIGRATION_IN,
                                          !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
5021 5022 5023 5024 5025 5026
        /* There's not much we can do for v2 protocol since the
         * original domain on the source host is already gone.
         */
        if (v3proto)
            goto endjob;
    }
5027

5028 5029 5030 5031 5032 5033 5034 5035 5036
    /* Now that the state data was transferred we can refresh the actual state
     * of the devices */
    if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
        /* Similarly to the case above v2 protocol will not be able to recover
         * from this. Let's ignore this and perhaps stuff will not break. */
        if (v3proto)
            goto endjob;
    }

5037
    if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
5038 5039
        inPostCopy = true;

5040 5041 5042 5043 5044
    if (!(flags & VIR_MIGRATE_PAUSED)) {
        /* run 'cont' on the destination, which allows migration on qemu
         * >= 0.10.6 to work properly.  This isn't strictly necessary on
         * older qemu's, but it also doesn't hurt anything there
         */
5045
        if (qemuProcessStartCPUs(driver, vm,
5046 5047
                                 inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
                                            : VIR_DOMAIN_RUNNING_MIGRATED,
5048
                                 QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
5049
            if (virGetLastErrorCode() == VIR_ERR_OK)
5050 5051 5052 5053 5054 5055
                virReportError(VIR_ERR_INTERNAL_ERROR,
                               "%s", _("resume operation failed"));
            /* Need to save the current error, in case shutting
             * down the process overwrites it
             */
            orig_err = virSaveLastError();
5056

5057 5058 5059 5060 5061 5062 5063 5064 5065 5066
            /*
             * In v3 protocol, the source VM is still available to
             * restart during confirm() step, so we kill it off
             * now.
             * In v2 protocol, the source is dead, so we leave
             * target in paused state, in case admin can fix
             * things up.
             */
            if (v3proto)
                goto endjob;
5067
        }
5068

5069
        if (inPostCopy)
5070
            doKill = false;
5071
    }
5072

5073
    if (mig->jobInfo) {
5074
        jobInfo = mig->jobInfo;
5075 5076 5077 5078 5079 5080
        mig->jobInfo = NULL;

        if (jobInfo->sent && timeReceived) {
            jobInfo->timeDelta = timeReceived - jobInfo->sent;
            jobInfo->received = timeReceived;
            jobInfo->timeDeltaSet = true;
5081
        }
5082 5083
        qemuDomainJobInfoUpdateTime(jobInfo);
        qemuDomainJobInfoUpdateDowntime(jobInfo);
5084
    }
L
liguang 已提交
5085

5086
    if (inPostCopy) {
5087 5088 5089
        if (qemuMigrationDstWaitForCompletion(driver, vm,
                                              QEMU_ASYNC_JOB_MIGRATION_IN,
                                              false) < 0) {
5090 5091 5092 5093 5094 5095 5096 5097 5098
            goto endjob;
        }
        if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
            virDomainObjSetState(vm,
                                 VIR_DOMAIN_RUNNING,
                                 VIR_DOMAIN_RUNNING_MIGRATED);
        }
    }

5099
    dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, vm->def->id);
5100

5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114
    if (inPostCopy) {
        /* The only RESUME event during post-copy migration is triggered by
         * QEMU when the running domain moves from the source to the
         * destination host, but then the migration keeps running until all
         * modified memory is transferred from the source host. This will
         * result in VIR_DOMAIN_EVENT_RESUMED with RESUMED_POSTCOPY detail.
         * However, our API documentation says we need to fire another RESUMED
         * event at the very end of migration with RESUMED_MIGRATED detail.
         */
        event = virDomainEventLifecycleNewFromObj(vm,
                                                  VIR_DOMAIN_EVENT_RESUMED,
                                                  VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
        virObjectEventStateQueue(driver->domainEventState, event);
    }
5115

5116 5117 5118 5119 5120
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
        virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
        event = virDomainEventLifecycleNewFromObj(vm,
                                                  VIR_DOMAIN_EVENT_SUSPENDED,
                                                  VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
5121
        virObjectEventStateQueue(driver->domainEventState, event);
5122
    }
5123

5124
    if (virDomainObjIsActive(vm) &&
5125
        virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
5126 5127 5128 5129 5130
        VIR_WARN("Failed to save status on vm %s", vm->def->name);

    /* Guest is successfully running, so cancel previous auto destroy */
    qemuProcessAutoDestroyRemove(driver, vm);

5131
 endjob:
5132
    if (!dom &&
5133 5134
        !(flags & VIR_MIGRATE_OFFLINE) &&
        virDomainObjIsActive(vm)) {
5135
        if (doKill) {
5136 5137 5138 5139 5140 5141 5142
            qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                            QEMU_ASYNC_JOB_MIGRATION_IN,
                            VIR_QEMU_PROCESS_STOP_MIGRATED);
            virDomainAuditStop(vm, "failed");
            event = virDomainEventLifecycleNewFromObj(vm,
                                VIR_DOMAIN_EVENT_STOPPED,
                                VIR_DOMAIN_EVENT_STOPPED_FAILED);
5143
            virObjectEventStateQueue(driver->domainEventState, event);
5144
        } else {
5145
            qemuMigrationAnyPostcopyFailed(driver, vm);
5146
        }
5147 5148
    }

5149
    if (dom) {
5150 5151 5152
        if (jobInfo) {
            VIR_STEAL_PTR(priv->job.completed, jobInfo);
            priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
5153
            priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
5154
        }
5155

5156 5157 5158
        if (qemuMigrationBakeCookie(mig, driver, vm,
                                    QEMU_MIGRATION_DESTINATION,
                                    cookieout, cookieoutlen,
5159 5160
                                    QEMU_MIGRATION_COOKIE_STATS) < 0)
            VIR_WARN("Unable to encode migration cookie");
5161 5162 5163 5164 5165 5166

        /* Remove completed stats for post-copy, everything but timing fields
         * is obsolete anyway.
         */
        if (inPostCopy)
            VIR_FREE(priv->job.completed);
5167
    }
5168

5169
    qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
5170
                             priv->job.migParams, priv->job.apiFlags);
5171

5172
    qemuMigrationJobFinish(driver, vm);
5173
    if (!virDomainObjIsActive(vm))
5174
        qemuDomainRemoveInactiveJob(driver, vm);
5175

5176
 cleanup:
5177
    VIR_FREE(jobInfo);
5178
    virPortAllocatorRelease(port);
5179
    if (priv->mon)
5180
        qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
5181
    VIR_FREE(priv->origname);
M
Michal Privoznik 已提交
5182
    virDomainObjEndAPI(&vm);
5183
    qemuMigrationCookieFree(mig);
5184 5185 5186 5187
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
5188
    virObjectUnref(cfg);
5189 5190 5191 5192

    /* Set a special error if Finish is expected to return NULL as a result of
     * successful call with retcode != 0
     */
5193
    if (retcode != 0 && !dom && virGetLastErrorCode() == VIR_ERR_OK)
5194
        virReportError(VIR_ERR_MIGRATE_FINISH_OK, NULL);
5195 5196
    return dom;
}
5197

5198

5199
/* Helper function called while vm is active.  */
5200
int
5201 5202 5203 5204
qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
                       int fd,
                       const char *compressor,
                       qemuDomainAsyncJob asyncJob)
5205 5206 5207
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int rc;
5208
    int ret = -1;
5209 5210
    virCommandPtr cmd = NULL;
    int pipeFD[2] = { -1, -1 };
5211
    unsigned long saveMigBandwidth = priv->migMaxBandwidth;
5212
    char *errbuf = NULL;
5213
    virErrorPtr orig_err = NULL;
5214 5215 5216 5217 5218

    /* Increase migration bandwidth to unlimited since target is a file.
     * Failure to change migration speed is not fatal. */
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
        qemuMonitorSetMigrationSpeed(priv->mon,
5219 5220
                                     QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
        priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
5221 5222
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
            return -1;
5223
    }
5224

5225 5226 5227 5228 5229 5230 5231
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
        /* nothing to tear down */
        return -1;
    }

5232 5233 5234 5235
    if (compressor && pipe(pipeFD) < 0) {
        virReportSystemError(errno, "%s",
                             _("Failed to create pipe for migration"));
        return -1;
5236 5237
    }

5238 5239 5240 5241
    /* All right! We can use fd migration, which means that qemu
     * doesn't have to open() the file, so while we still have to
     * grant SELinux access, we can do it on fd and avoid cleanup
     * later, as well as skip futzing with cgroup.  */
5242 5243
    if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
                                    compressor ? pipeFD[1] : fd) < 0)
5244 5245
        goto cleanup;

5246
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
5247 5248
        goto cleanup;

5249
    if (!compressor) {
5250 5251 5252
        rc = qemuMonitorMigrateToFd(priv->mon,
                                    QEMU_MONITOR_MIGRATE_BACKGROUND,
                                    fd);
5253 5254 5255 5256 5257 5258 5259
    } else {
        const char *prog = compressor;
        const char *args[] = {
            prog,
            "-c",
            NULL
        };
5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270

        cmd = virCommandNewArgs(args);
        virCommandSetInputFD(cmd, pipeFD[0]);
        virCommandSetOutputFD(cmd, &fd);
        virCommandSetErrorBuffer(cmd, &errbuf);
        virCommandDoAsyncIO(cmd);
        if (virSetCloseExec(pipeFD[1]) < 0) {
            virReportSystemError(errno, "%s",
                                 _("Unable to set cloexec flag"));
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
            goto cleanup;
5271
        }
5272 5273 5274 5275 5276 5277 5278 5279 5280 5281
        if (virCommandRunAsync(cmd, NULL) < 0) {
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
            goto cleanup;
        }
        rc = qemuMonitorMigrateToFd(priv->mon,
                                    QEMU_MONITOR_MIGRATE_BACKGROUND,
                                    pipeFD[1]);
        if (VIR_CLOSE(pipeFD[0]) < 0 ||
            VIR_CLOSE(pipeFD[1]) < 0)
            VIR_WARN("failed to close intermediate pipe");
5282
    }
5283
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
5284
        goto cleanup;
5285 5286 5287
    if (rc < 0)
        goto cleanup;

5288
    rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0);
5289

5290 5291 5292 5293
    if (rc < 0) {
        if (rc == -2) {
            orig_err = virSaveLastError();
            virCommandAbort(cmd);
5294 5295
            if (virDomainObjIsActive(vm) &&
                qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5296
                qemuMonitorMigrateCancel(priv->mon);
5297
                ignore_value(qemuDomainObjExitMonitor(driver, vm));
5298 5299
            }
        }
5300
        goto cleanup;
5301
    }
5302

5303 5304 5305
    if (cmd && virCommandWait(cmd, NULL) < 0)
        goto cleanup;

5306
    qemuDomainEventEmitJobCompleted(driver, vm);
5307 5308
    ret = 0;

5309
 cleanup:
5310 5311 5312
    if (ret < 0 && !orig_err)
        orig_err = virSaveLastError();

5313
    /* Restore max migration bandwidth */
5314 5315
    if (virDomainObjIsActive(vm) &&
        qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5316 5317
        qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth);
        priv->migMaxBandwidth = saveMigBandwidth;
5318
        ignore_value(qemuDomainObjExitMonitor(driver, vm));
5319 5320
    }

5321 5322
    VIR_FORCE_CLOSE(pipeFD[0]);
    VIR_FORCE_CLOSE(pipeFD[1]);
5323 5324 5325 5326 5327
    if (cmd) {
        VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf));
        VIR_FREE(errbuf);
        virCommandFree(cmd);
    }
5328 5329 5330 5331 5332 5333

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

5334 5335
    return ret;
}
5336

5337 5338

int
5339 5340
qemuMigrationSrcCancel(virQEMUDriverPtr driver,
                       virDomainObjPtr vm)
5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    bool storage = false;
    size_t i;
    int ret = -1;

    VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
              vm->def->name);

    qemuDomainObjEnterMonitor(driver, vm);
    ignore_value(qemuMonitorMigrateCancel(priv->mon));
5352
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
5353 5354 5355 5356 5357
        goto cleanup;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
5358
        qemuBlockJobDataPtr job;
5359

5360
        if (!(job = qemuBlockJobDiskGetJob(disk)) ||
5361
            !qemuBlockJobIsRunning(job))
5362
            diskPriv->migrating = false;
5363 5364

        if (diskPriv->migrating) {
5365
            qemuBlockJobSyncBegin(job);
5366
            storage = true;
5367 5368
        }

5369 5370
        virObjectUnref(job);
    }
5371 5372 5373

    if (storage &&
        qemuMigrationSrcNBDCopyCancel(driver, vm, false,
5374
                                      QEMU_ASYNC_JOB_NONE, NULL) < 0)
5375
        goto cleanup;
5376 5377 5378 5379 5380 5381 5382 5383

    ret = 0;

 cleanup:
    return ret;
}


5384
static int
5385
qemuMigrationJobStart(virQEMUDriverPtr driver,
5386
                      virDomainObjPtr vm,
5387 5388
                      qemuDomainAsyncJob job,
                      unsigned long apiFlags)
5389
{
5390
    qemuDomainObjPrivatePtr priv = vm->privateData;
5391 5392
    virDomainJobOperation op;
    unsigned long long mask;
5393

5394
    if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
5395 5396
        op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
        mask = QEMU_JOB_NONE;
5397
    } else {
5398 5399 5400 5401
        op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
        mask = QEMU_JOB_DEFAULT_MASK |
               JOB_MASK(QEMU_JOB_SUSPEND) |
               JOB_MASK(QEMU_JOB_MIGRATION_OP);
5402
    }
5403

5404
    if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
5405 5406
        return -1;

5407 5408
    priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;

5409
    qemuDomainObjSetAsyncJobMask(vm, mask);
5410 5411 5412
    return 0;
}

5413
static void
5414
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
5415
                         virDomainObjPtr vm,
5416
                         qemuMigrationJobPhase phase)
5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (phase < priv->job.phase) {
        VIR_ERROR(_("migration protocol going backwards %s => %s"),
                  qemuMigrationJobPhaseTypeToString(priv->job.phase),
                  qemuMigrationJobPhaseTypeToString(phase));
        return;
    }

    qemuDomainObjSetJobPhase(driver, vm, phase);
}

5430
static void
5431
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
5432
                           virDomainObjPtr vm,
5433
                           qemuMigrationJobPhase phase)
5434 5435 5436 5437
{
    qemuMigrationJobSetPhase(driver, vm, phase);
}

5438
static void
5439 5440
qemuMigrationJobContinue(virDomainObjPtr vm)
{
5441
    qemuDomainObjReleaseAsyncJob(vm);
5442 5443
}

5444
static bool
5445
qemuMigrationJobIsActive(virDomainObjPtr vm,
5446
                         qemuDomainAsyncJob job)
5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (priv->job.asyncJob != job) {
        const char *msg;

        if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
            msg = _("domain '%s' is not processing incoming migration");
        else
            msg = _("domain '%s' is not being migrated");

5458
        virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
5459 5460 5461 5462 5463
        return false;
    }
    return true;
}

5464
static void
5465
qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
5466
{
5467
    qemuDomainObjEndAsyncJob(driver, vm);
5468
}
5469 5470 5471


static void
5472
qemuMigrationDstErrorFree(void *data,
5473 5474 5475 5476 5477 5478 5479
                       const void *name ATTRIBUTE_UNUSED)
{
    virErrorPtr err = data;
    virFreeError(err);
}

int
5480
qemuMigrationDstErrorInit(virQEMUDriverPtr driver)
5481
{
5482
    driver->migrationErrors = virHashAtomicNew(64, qemuMigrationDstErrorFree);
5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493
    if (driver->migrationErrors)
        return 0;
    else
        return -1;
}

/**
 * This function consumes @err; the caller should consider the @err pointer
 * invalid after calling this function.
 */
void
5494 5495 5496
qemuMigrationDstErrorSave(virQEMUDriverPtr driver,
                          const char *name,
                          virErrorPtr err)
5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509
{
    if (!err)
        return;

    VIR_DEBUG("Saving incoming migration error for domain %s: %s",
              name, err->message);
    if (virHashAtomicUpdate(driver->migrationErrors, name, err) < 0) {
        VIR_WARN("Failed to save migration error for domain '%s'", name);
        virFreeError(err);
    }
}

void
5510 5511
qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
                            const char *name)
5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522
{
    virErrorPtr err;

    if (!(err = virHashAtomicSteal(driver->migrationErrors, name)))
        return;

    VIR_DEBUG("Restoring saved incoming migration error for domain %s: %s",
              name, err->message);
    virSetError(err);
    virFreeError(err);
}
5523 5524


5525
int
5526 5527 5528 5529
qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 qemuDomainAsyncJob asyncJob,
                                 qemuDomainJobInfoPtr jobInfo)
5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573
{
    size_t i;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    bool nbd = false;
    virHashTablePtr blockinfo = NULL;
    qemuDomainMirrorStatsPtr stats = &jobInfo->mirrorStats;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) {
            nbd = true;
            break;
        }
    }

    if (!nbd)
        return 0;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    blockinfo = qemuMonitorGetAllBlockJobInfo(priv->mon);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockinfo)
        return -1;

    memset(stats, 0, sizeof(*stats));

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
        qemuMonitorBlockJobInfoPtr data;

        if (!diskPriv->migrating ||
            !(data = virHashLookup(blockinfo, disk->info.alias)))
            continue;

        stats->transferred += data->cur;
        stats->total += data->end;
    }

    virHashFree(blockinfo);
    return 0;
}