You need to sign in or sign up before continuing.
qemu_migration.c 187.9 KB
Newer Older
1 2 3
/*
 * qemu_migration.c: QEMU migration handling
 *
4
 * Copyright (C) 2006-2015 Red Hat, Inc.
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library.  If not, see
O
Osier Yang 已提交
18
 * <http://www.gnu.org/licenses/>.
19 20 21 22 23
 *
 */

#include <config.h>

J
Ján Tomko 已提交
24 25
#include <netdb.h>
#include <sys/socket.h>
26
#include <sys/time.h>
27
#include <fcntl.h>
28
#include <poll.h>
29 30

#include "qemu_migration.h"
31
#include "qemu_migration_cookie.h"
32
#include "qemu_migration_params.h"
33 34 35 36
#include "qemu_monitor.h"
#include "qemu_domain.h"
#include "qemu_process.h"
#include "qemu_capabilities.h"
37
#include "qemu_alias.h"
38
#include "qemu_cgroup.h"
39
#include "qemu_hotplug.h"
40
#include "qemu_blockjob.h"
41
#include "qemu_security.h"
42
#include "qemu_slirp.h"
43
#include "qemu_block.h"
44

45
#include "domain_audit.h"
46
#include "virlog.h"
47
#include "virerror.h"
48
#include "viralloc.h"
E
Eric Blake 已提交
49
#include "virfile.h"
50
#include "virnetdevopenvswitch.h"
51
#include "datatypes.h"
52
#include "virfdstream.h"
53
#include "viruuid.h"
54
#include "virtime.h"
55
#include "locking/domain_lock.h"
56
#include "rpc/virnetsocket.h"
57
#include "virstoragefile.h"
M
Martin Kletzander 已提交
58
#include "viruri.h"
59
#include "virhook.h"
60
#include "virstring.h"
61
#include "virtypedparam.h"
M
Michael R. Hines 已提交
62
#include "virprocess.h"
63
#include "nwfilter_conf.h"
64
#include "virdomainsnapshotobjlist.h"
65 66 67

#define VIR_FROM_THIS VIR_FROM_QEMU

68 69
VIR_LOG_INIT("qemu.qemu_migration");

70 71
VIR_ENUM_IMPL(qemuMigrationJobPhase,
              QEMU_MIGRATION_PHASE_LAST,
72 73 74 75 76 77 78 79 80 81 82 83
              "none",
              "perform2",
              "begin3",
              "perform3",
              "perform3_done",
              "confirm3_cancelled",
              "confirm3",
              "prepare",
              "finish2",
              "finish3",
);

84 85 86
static int
qemuMigrationJobStart(virQEMUDriverPtr driver,
                      virDomainObjPtr vm,
87 88
                      qemuDomainAsyncJob job,
                      unsigned long apiFlags)
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;

static void
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
                         virDomainObjPtr vm,
                         qemuMigrationJobPhase phase)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);

static void
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           qemuMigrationJobPhase phase)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);

static void
qemuMigrationJobContinue(virDomainObjPtr obj)
    ATTRIBUTE_NONNULL(1);

static bool
qemuMigrationJobIsActive(virDomainObjPtr vm,
                         qemuDomainAsyncJob job)
    ATTRIBUTE_NONNULL(1);

static void
qemuMigrationJobFinish(virQEMUDriverPtr driver,
                       virDomainObjPtr obj)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
116

117
static void
118
qemuMigrationSrcStoreDomainState(virDomainObjPtr vm)
119 120 121 122 123 124 125 126 127 128
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    priv->preMigrationState = virDomainObjGetState(vm, NULL);

    VIR_DEBUG("Storing pre-migration state=%d domain=%p",
              priv->preMigrationState, vm);
}

/* Returns true if the domain was resumed, false otherwise */
static bool
129
qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
130 131
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
132 133
    int reason;
    virDomainState state = virDomainObjGetState(vm, &reason);
134 135
    bool ret = false;

136 137 138 139 140
    VIR_DEBUG("driver=%p, vm=%p, pre-mig-state=%s, state=%s, reason=%s",
              driver, vm,
              virDomainStateTypeToString(priv->preMigrationState),
              virDomainStateTypeToString(state),
              virDomainStateReasonToString(state, reason));
141

142 143 144 145 146
    if (state != VIR_DOMAIN_PAUSED ||
        reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
        goto cleanup;

    if (priv->preMigrationState == VIR_DOMAIN_RUNNING) {
147 148 149 150 151 152
        /* This is basically the only restore possibility that's safe
         * and we should attempt to do */

        VIR_DEBUG("Restoring pre-migration state due to migration error");

        /* we got here through some sort of failure; start the domain again */
153
        if (qemuProcessStartCPUs(driver, vm,
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
                                 VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
                                 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
            /* Hm, we already know we are in error here.  We don't want to
             * overwrite the previous error, though, so we just throw something
             * to the logs and hope for the best */
            VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name);
            goto cleanup;
        }
        ret = true;
    }

 cleanup:
    priv->preMigrationState = VIR_DOMAIN_NOSTATE;
    return ret;
}

170 171

static int
172 173 174
qemuMigrationDstPrecreateDisk(virConnectPtr conn,
                              virDomainDiskDefPtr disk,
                              unsigned long long capacity)
175 176 177 178 179 180 181 182 183 184 185 186
{
    int ret = -1;
    virStoragePoolPtr pool = NULL;
    virStorageVolPtr vol = NULL;
    char *volName = NULL, *basePath = NULL;
    char *volStr = NULL;
    virBuffer buf = VIR_BUFFER_INITIALIZER;
    const char *format = NULL;
    unsigned int flags = 0;

    VIR_DEBUG("Precreate disk type=%s", virStorageTypeToString(disk->src->type));

187
    switch ((virStorageType)disk->src->type) {
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
    case VIR_STORAGE_TYPE_FILE:
        if (!virDomainDiskGetSource(disk)) {
            VIR_DEBUG("Dropping sourceless disk '%s'",
                      disk->dst);
            return 0;
        }

        if (VIR_STRDUP(basePath, disk->src->path) < 0)
            goto cleanup;

        if (!(volName = strrchr(basePath, '/'))) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("malformed disk path: %s"),
                           disk->src->path);
            goto cleanup;
        }

        *volName = '\0';
        volName++;

208
        if (!(pool = virStoragePoolLookupByTargetPath(conn, basePath)))
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
            goto cleanup;
        format = virStorageFileFormatTypeToString(disk->src->format);
        if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
            flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
        break;

    case VIR_STORAGE_TYPE_VOLUME:
        if (!(pool = virStoragePoolLookupByName(conn, disk->src->srcpool->pool)))
            goto cleanup;
        format = virStorageFileFormatTypeToString(disk->src->format);
        volName = disk->src->srcpool->volume;
        if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
            flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
        break;

224 225 226 227 228
    case VIR_STORAGE_TYPE_NETWORK:
        VIR_DEBUG("Skipping creation of network disk '%s'",
                  disk->dst);
        return 0;

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
    case VIR_STORAGE_TYPE_BLOCK:
    case VIR_STORAGE_TYPE_DIR:
    case VIR_STORAGE_TYPE_NONE:
    case VIR_STORAGE_TYPE_LAST:
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("cannot precreate storage for disk type '%s'"),
                       virStorageTypeToString(disk->src->type));
        goto cleanup;
    }

    if ((vol = virStorageVolLookupByName(pool, volName))) {
        VIR_DEBUG("Skipping creation of already existing volume of name '%s'",
                  volName);
        ret = 0;
        goto cleanup;
    }

    virBufferAddLit(&buf, "<volume>\n");
    virBufferAdjustIndent(&buf, 2);
    virBufferEscapeString(&buf, "<name>%s</name>\n", volName);
    virBufferAsprintf(&buf, "<capacity>%llu</capacity>\n", capacity);
    virBufferAddLit(&buf, "<target>\n");
    virBufferAdjustIndent(&buf, 2);
    virBufferAsprintf(&buf, "<format type='%s'/>\n", format);
    virBufferAdjustIndent(&buf, -2);
    virBufferAddLit(&buf, "</target>\n");
    virBufferAdjustIndent(&buf, -2);
    virBufferAddLit(&buf, "</volume>\n");

    if (!(volStr = virBufferContentAndReset(&buf))) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("unable to create volume XML"));
        goto cleanup;
    }

    if (!(vol = virStorageVolCreateXML(pool, volStr, flags)))
        goto cleanup;

    ret = 0;
 cleanup:
    VIR_FREE(basePath);
    VIR_FREE(volStr);
    virObjectUnref(vol);
    virObjectUnref(pool);
    return ret;
}

276
static bool
277 278
qemuMigrationAnyCopyDisk(virDomainDiskDef const *disk,
                         size_t nmigrate_disks, const char **migrate_disks)
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
{
    size_t i;

    /* Check if the disk alias is in the list */
    if (nmigrate_disks) {
        for (i = 0; i < nmigrate_disks; i++) {
            if (STREQ(disk->dst, migrate_disks[i]))
                return true;
        }
        return false;
    }

    /* Default is to migrate only non-shared non-readonly disks
     * with source */
    return !disk->src->shared && !disk->src->readonly &&
294
           !virStorageSourceIsEmpty(disk->src);
295 296
}

297 298

static int
299 300 301 302 303
qemuMigrationDstPrecreateStorage(virDomainObjPtr vm,
                                 qemuMigrationCookieNBDPtr nbd,
                                 size_t nmigrate_disks,
                                 const char **migrate_disks,
                                 bool incremental)
304 305 306
{
    int ret = -1;
    size_t i = 0;
307
    virConnectPtr conn;
308 309 310 311

    if (!nbd || !nbd->ndisks)
        return 0;

312 313 314
    if (!(conn = virGetConnectStorage()))
        return -1;

315 316 317 318
    for (i = 0; i < nbd->ndisks; i++) {
        virDomainDiskDefPtr disk;
        const char *diskSrcPath;

319
        VIR_DEBUG("Looking up disk target '%s' (capacity=%llu)",
320 321
                  nbd->disks[i].target, nbd->disks[i].capacity);

322 323
        if (!(disk = virDomainDiskByName(vm->def, nbd->disks[i].target,
                                         false))) {
324 325 326 327 328 329 330 331
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("unable to find disk by target: %s"),
                           nbd->disks[i].target);
            goto cleanup;
        }

        diskSrcPath = virDomainDiskGetSource(disk);

332
        /* Skip disks we don't want to migrate and already existing disks. */
333
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks) ||
334 335 336 337
            (diskSrcPath && virFileExists(diskSrcPath))) {
            continue;
        }

338 339 340 341 342 343 344
        if (incremental) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("pre-creation of storage targets for incremental "
                             "storage migration is not supported"));
            goto cleanup;
        }

345 346
        VIR_DEBUG("Proceeding with disk source %s", NULLSTR(diskSrcPath));

347
        if (qemuMigrationDstPrecreateDisk(conn, disk, nbd->disks[i].capacity) < 0)
348 349 350 351 352
            goto cleanup;
    }

    ret = 0;
 cleanup:
353
    virObjectUnref(conn);
354 355 356 357
    return ret;
}


358
/**
359
 * qemuMigrationDstStartNBDServer:
360 361 362 363 364 365 366 367 368 369 370
 * @driver: qemu driver
 * @vm: domain
 *
 * Starts NBD server. This is a newer method to copy
 * storage during migration than using 'blk' and 'inc'
 * arguments in 'migrate' monitor command.
 * Error is reported here.
 *
 * Returns 0 on success, -1 otherwise.
 */
static int
371 372 373 374 375
qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
                               const char *listenAddr,
                               size_t nmigrate_disks,
                               const char **migrate_disks,
376 377
                               int nbdPort,
                               const char *tls_alias)
378 379 380 381 382 383
{
    int ret = -1;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    unsigned short port = 0;
    char *diskAlias = NULL;
    size_t i;
384 385 386 387
    virStorageNetHostDef server = {
        .name = (char *)listenAddr, /* cast away const */
        .transport = VIR_STORAGE_NET_HOST_TRANS_TCP,
    };
388

389 390 391 392 393 394
    if (nbdPort < 0 || nbdPort > USHRT_MAX) {
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("nbd port must be in range 0-65535"));
        return -1;
    }

395 396 397
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];

398
        /* check whether disk should be migrated */
399
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
400 401
            continue;

402
        if (disk->src->readonly || virStorageSourceIsEmpty(disk->src)) {
403
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
404 405
                           _("Cannot migrate empty or read-only disk %s"),
                           disk->dst);
406 407 408
            goto cleanup;
        }

409
        VIR_FREE(diskAlias);
410
        if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
411 412 413 414 415 416
            goto cleanup;

        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
            goto cleanup;

417 418 419 420 421 422
        if (port == 0) {
            if (nbdPort)
                port = nbdPort;
            else if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
                goto exit_monitor;

423 424
            server.port = port;
            if (qemuMonitorNBDServerStart(priv->mon, &server, tls_alias) < 0)
425
                goto exit_monitor;
426 427
        }

428
        if (qemuMonitorNBDServerAdd(priv->mon, diskAlias, NULL, true, NULL) < 0)
429 430
            goto exit_monitor;
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
431 432 433 434 435 436
            goto cleanup;
    }

    priv->nbdPort = port;
    ret = 0;

437
 cleanup:
438
    VIR_FREE(diskAlias);
439
    if (ret < 0 && nbdPort == 0)
440
        virPortAllocatorRelease(port);
441
    return ret;
442 443 444 445

 exit_monitor:
    ignore_value(qemuDomainObjExitMonitor(driver, vm));
    goto cleanup;
446 447
}

448 449

static int
450 451 452
qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              qemuMigrationCookiePtr mig)
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (!mig->nbd)
        return 0;

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
        return -1;

    if (qemuMonitorNBDServerStop(priv->mon) < 0)
        VIR_WARN("Unable to stop NBD server");
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
        return -1;

468
    virPortAllocatorRelease(priv->nbdPort);
469 470 471 472 473
    priv->nbdPort = 0;
    return 0;
}


474
static void
475 476
qemuMigrationNBDReportMirrorError(qemuBlockJobDataPtr job,
                                  const char *diskdst)
477
{
478
    if (job->errmsg) {
479 480
        virReportError(VIR_ERR_OPERATION_FAILED,
                       _("migration of disk %s failed: %s"),
481
                       diskdst, job->errmsg);
482 483
    } else {
        virReportError(VIR_ERR_OPERATION_FAILED,
484
                       _("migration of disk %s failed"), diskdst);
485 486 487 488
    }
}


489
/**
490
 * qemuMigrationSrcNBDStorageCopyReady:
491 492
 * @vm: domain
 *
493 494
 * Check the status of all drives copied via qemuMigrationSrcNBDStorageCopy.
 * Any pending block job events for the mirrored disks will be processed.
495 496 497 498 499 500
 *
 * Returns 1 if all mirrors are "ready",
 *         0 if some mirrors are still performing initial sync,
 *        -1 on error.
 */
static int
501 502
qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm,
                                    qemuDomainAsyncJob asyncJob)
503 504
{
    size_t i;
505 506
    size_t notReady = 0;
    int status;
507 508 509

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
510
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
511
        qemuBlockJobDataPtr job;
512

513
        if (!diskPriv->migrating)
514 515
            continue;

516 517 518 519 520 521 522
        if (!(job = qemuBlockJobDiskGetJob(disk))) {
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("missing block job data for disk '%s'"), disk->dst);
            return -1;
        }

        status = qemuBlockJobUpdate(vm, job, asyncJob);
523
        if (status == VIR_DOMAIN_BLOCK_JOB_FAILED) {
524 525
            qemuMigrationNBDReportMirrorError(job, disk->dst);
            virObjectUnref(job);
526 527
            return -1;
        }
528

529 530
        virObjectUnref(job);

531 532
        if (disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY)
            notReady++;
533 534
    }

535 536 537 538 539 540 541
    if (notReady) {
        VIR_DEBUG("Waiting for %zu disk mirrors to get ready", notReady);
        return 0;
    } else {
        VIR_DEBUG("All disk mirrors are ready");
        return 1;
    }
542 543 544
}


545 546 547 548
/*
 * If @check is true, the function will report an error and return a different
 * code in case a block job fails. This way we can properly abort migration in
 * case some block jobs failed once all memory has already been transferred.
549
 *
550 551 552 553
 * Returns 1 if all mirrors are gone,
 *         0 if some mirrors are still active,
 *         -1 some mirrors failed but some are still active,
 *         -2 all mirrors are gone but some of them failed.
554 555
 */
static int
556 557 558
qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm,
                                 qemuDomainAsyncJob asyncJob,
                                 bool check)
559
{
560 561
    size_t i;
    size_t active = 0;
562
    size_t completed = 0;
563 564
    int status;
    bool failed = false;
565

566
 retry:
567 568 569
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
570
        qemuBlockJobDataPtr job;
571

572 573
        if (!diskPriv->migrating)
            continue;
574

575 576 577 578
        if (!(job = qemuBlockJobDiskGetJob(disk)))
            continue;

        status = qemuBlockJobUpdate(vm, job, asyncJob);
579 580 581
        switch (status) {
        case VIR_DOMAIN_BLOCK_JOB_FAILED:
            if (check) {
582
                qemuMigrationNBDReportMirrorError(job, disk->dst);
583
                failed = true;
584
            }
M
Marc Hartmayer 已提交
585
            ATTRIBUTE_FALLTHROUGH;
586 587 588 589
        case VIR_DOMAIN_BLOCK_JOB_CANCELED:
        case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
            diskPriv->migrating = false;
            break;
590

591 592
        default:
            active++;
593
        }
594 595 596

        if (status == VIR_DOMAIN_BLOCK_JOB_COMPLETED)
            completed++;
597 598

        virObjectUnref(job);
599 600 601 602 603 604 605 606 607 608
    }

    /* Updating completed block job drops the lock thus we have to recheck
     * block jobs for disks that reside before the disk(s) with completed
     * block job.
     */
    if (completed > 0) {
        completed = 0;
        active = 0;
        goto retry;
609
    }
610

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
    if (failed) {
        if (active) {
            VIR_DEBUG("Some disk mirrors failed; still waiting for %zu "
                      "disk mirrors to finish", active);
            return -1;
        } else {
            VIR_DEBUG("All disk mirrors are gone; some of them failed");
            return -2;
        }
    } else {
        if (active) {
            VIR_DEBUG("Waiting for %zu disk mirrors to finish", active);
            return 0;
        } else {
            VIR_DEBUG("All disk mirrors are gone");
            return 1;
627
        }
628
    }
629
}
630 631


632 633 634 635 636 637
/*
 * Returns 0 on success,
 *         1 when job is already completed or it failed and failNoJob is false,
 *         -1 on error or when job failed and failNoJob is true.
 */
static int
638 639 640
qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 virDomainDiskDefPtr disk,
641
                                 qemuBlockJobDataPtr job,
642 643
                                 bool failNoJob,
                                 qemuDomainAsyncJob asyncJob)
644 645 646 647 648 649 650
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    char *diskAlias = NULL;
    int ret = -1;
    int status;
    int rv;

651
    status = qemuBlockJobUpdate(vm, job, asyncJob);
652 653 654 655
    switch (status) {
    case VIR_DOMAIN_BLOCK_JOB_FAILED:
    case VIR_DOMAIN_BLOCK_JOB_CANCELED:
        if (failNoJob) {
656
            qemuMigrationNBDReportMirrorError(job, disk->dst);
657
            goto cleanup;
658
        }
659
        ATTRIBUTE_FALLTHROUGH;
660
    case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
661 662
        ret = 1;
        goto cleanup;
663 664
    }

665
    if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
666 667
        return -1;

668
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
669 670
        goto cleanup;

671
    rv = qemuMonitorBlockJobCancel(priv->mon, diskAlias);
672 673 674 675 676

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        goto cleanup;

    ret = 0;
677 678 679 680 681 682 683 684

 cleanup:
    VIR_FREE(diskAlias);
    return ret;
}


/**
685
 * qemuMigrationSrcNBDCopyCancel:
686 687
 * @driver: qemu driver
 * @vm: domain
688
 * @check: if true report an error when some of the mirrors fails
689
 *
690
 * Cancel all drive-mirrors started by qemuMigrationSrcNBDStorageCopy.
691 692 693
 * Any pending block job events for the affected disks will be processed and
 * synchronous block job terminated regardless of return value unless qemu
 * has crashed.
694 695 696 697
 *
 * Returns 0 on success, -1 otherwise.
 */
static int
698 699 700 701 702
qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              bool check,
                              qemuDomainAsyncJob asyncJob,
                              virConnectPtr dconn)
703
{
704
    virErrorPtr err = NULL;
705
    int ret = -1;
706
    size_t i;
707 708 709 710
    int rv;
    bool failed = false;

    VIR_DEBUG("Cancelling drive mirrors for domain %s", vm->def->name);
711 712 713

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
714
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
715
        qemuBlockJobDataPtr job;
716

717 718
        if (!(job = qemuBlockJobDiskGetJob(disk)) ||
            !qemuBlockJobIsRunning(job))
719 720
            diskPriv->migrating = false;

721 722
        if (!diskPriv->migrating) {
            virObjectUnref(job);
723
            continue;
724
        }
725

726
        rv = qemuMigrationSrcNBDCopyCancelOne(driver, vm, disk, job,
727
                                              check, asyncJob);
728 729 730 731 732 733
        if (rv != 0) {
            if (rv < 0) {
                if (!err)
                    err = virSaveLastError();
                failed = true;
            }
734
            qemuBlockJobSyncEnd(vm, job, asyncJob);
735 736
            diskPriv->migrating = false;
        }
737 738

        virObjectUnref(job);
739 740
    }

741
    while ((rv = qemuMigrationSrcNBDCopyCancelled(vm, asyncJob, check)) != 1) {
742 743 744 745 746 747 748
        if (check && !failed &&
            dconn && virConnectIsAlive(dconn) <= 0) {
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Lost connection to destination host"));
            failed = true;
        }

749 750 751 752
        if (rv < 0) {
            failed = true;
            if (rv == -2)
                break;
753
        }
754

755 756 757 758 759
        if (failed && !err)
            err = virSaveLastError();

        if (virDomainObjWait(vm) < 0)
            goto cleanup;
760 761
    }

762 763 764 765 766 767 768 769 770
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);

        if (!diskPriv->migrSource)
            continue;

        qemuBlockStorageSourceDetachOneBlockdev(driver, vm, asyncJob,
                                                diskPriv->migrSource);
771
        virObjectUnref(diskPriv->migrSource);
772 773 774
        diskPriv->migrSource = NULL;
    }

775 776 777
    ret = failed ? -1 : 0;

 cleanup:
778 779 780 781 782
    if (err) {
        virSetError(err);
        virFreeError(err);
    }
    return ret;
783 784 785
}


786 787 788 789 790 791 792 793
static int
qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver,
                                       virDomainObjPtr vm,
                                       virDomainDiskDefPtr disk,
                                       const char *diskAlias,
                                       const char *host,
                                       int port,
                                       unsigned long long mirror_speed,
794
                                       unsigned int mirror_shallow,
795 796
                                       const char *tlsAlias)
{
797
    VIR_AUTOPTR(qemuBlockStorageSourceAttachData) data = NULL;
798 799
    qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
    int mon_ret = 0;
800
    VIR_AUTOUNREF(virStorageSourcePtr) copysrc = NULL;
801 802 803

    VIR_DEBUG("starting blockdev mirror for disk=%s to host=%s", diskAlias, host);

804
    if (!(copysrc = virStorageSourceNew()))
805
        return -1;
806 807 808 809 810

    copysrc->type = VIR_STORAGE_TYPE_NETWORK;
    copysrc->protocol = VIR_STORAGE_NET_PROTOCOL_NBD;
    copysrc->format = VIR_STORAGE_FILE_RAW;

811
    if (!(copysrc->backingStore = virStorageSourceNew()))
812
        return -1;
813 814

    if (VIR_STRDUP(copysrc->path, diskAlias) < 0)
815
        return -1;
816 817

    if (VIR_ALLOC_N(copysrc->hosts, 1) < 0)
818
        return -1;
819 820 821 822 823

    copysrc->nhosts = 1;
    copysrc->hosts->transport = VIR_STORAGE_NET_HOST_TRANS_TCP;
    copysrc->hosts->port = port;
    if (VIR_STRDUP(copysrc->hosts->name, host) < 0)
824
        return -1;
825 826

    if (VIR_STRDUP(copysrc->tlsAlias, tlsAlias) < 0)
827
        return -1;
828 829 830

    if (virAsprintf(&copysrc->nodestorage, "migration-%s-storage", disk->dst) < 0 ||
        virAsprintf(&copysrc->nodeformat, "migration-%s-format", disk->dst) < 0)
831
        return -1;
832

833 834
    /* Migration via blockdev-mirror was supported sooner than the auto-read-only
     * feature was added to qemu */
835 836 837
    if (!(data = qemuBlockStorageSourceAttachPrepareBlockdev(copysrc,
                                                             copysrc->backingStore,
                                                             false)))
838
        return -1;
839 840 841

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
842
        return -1;
843 844 845 846

    mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);

    if (mon_ret == 0)
847
        mon_ret = qemuMonitorBlockdevMirror(qemuDomainGetMonitor(vm), NULL, false,
848
                                            diskAlias, copysrc->nodeformat,
849
                                            mirror_speed, 0, 0, mirror_shallow);
850 851 852 853 854

    if (mon_ret != 0)
        qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
855
        return -1;
856 857 858

    VIR_STEAL_PTR(diskPriv->migrSource, copysrc);

859
    return 0;
860 861 862
}


863 864 865 866 867 868 869
static int
qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver,
                                          virDomainObjPtr vm,
                                          const char *diskAlias,
                                          const char *host,
                                          int port,
                                          unsigned long long mirror_speed,
870
                                          bool mirror_shallow)
871
{
872
    VIR_AUTOFREE(char *) nbd_dest = NULL;
873 874 875 876 877
    int mon_ret;

    if (strchr(host, ':')) {
        if (virAsprintf(&nbd_dest, "nbd:[%s]:%d:exportname=%s",
                        host, port, diskAlias) < 0)
878
            return -1;
879 880 881
    } else {
        if (virAsprintf(&nbd_dest, "nbd:%s:%d:exportname=%s",
                        host, port, diskAlias) < 0)
882
            return -1;
883 884 885 886
    }

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
887
        return -1;
888 889 890

    mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
                                     diskAlias, nbd_dest, "raw",
891
                                     mirror_speed, 0, 0, mirror_shallow, true);
892 893

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
894
        return -1;
895

896
    return 0;
897 898 899
}


900 901 902 903 904 905 906
static int
qemuMigrationSrcNBDStorageCopyOne(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  virDomainDiskDefPtr disk,
                                  const char *host,
                                  int port,
                                  unsigned long long mirror_speed,
907
                                  bool mirror_shallow,
908 909 910
                                  const char *tlsAlias,
                                  unsigned int flags)
{
911
    qemuDomainObjPrivatePtr priv = vm->privateData;
912
    qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
913
    qemuBlockJobDataPtr job = NULL;
914 915 916 917 918 919 920
    char *diskAlias = NULL;
    int rc;
    int ret = -1;

    if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
        goto cleanup;

921
    if (!(job = qemuBlockJobDiskNew(vm, disk, QEMU_BLOCKJOB_TYPE_COPY, diskAlias)))
922 923
        goto cleanup;

924
    qemuBlockJobSyncBegin(job);
925

926 927
    if (flags & VIR_MIGRATE_TLS ||
        virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
928 929 930 931
        rc = qemuMigrationSrcNBDStorageCopyBlockdev(driver, vm,
                                                    disk, diskAlias,
                                                    host, port,
                                                    mirror_speed,
932
                                                    mirror_shallow,
933 934 935 936 937
                                                    tlsAlias);
    } else {
        rc = qemuMigrationSrcNBDStorageCopyDriveMirror(driver, vm, diskAlias,
                                                       host, port,
                                                       mirror_speed,
938
                                                       mirror_shallow);
939 940
    }

941
    if (rc < 0)
942 943 944
        goto cleanup;

    diskPriv->migrating = true;
945
    qemuBlockJobStarted(job, vm);
946 947 948 949

    ret = 0;

 cleanup:
950
    qemuBlockJobStartupFinalize(vm, job);
951 952 953 954 955
    VIR_FREE(diskAlias);
    return ret;
}


956
/**
957
 * qemuMigrationSrcNBDStorageCopy:
958 959 960 961
 * @driver: qemu driver
 * @vm: domain
 * @mig: migration cookie
 * @host: where are we migrating to
962
 * @speed: bandwidth limit in MiB/s
963 964
 * @migrate_flags: migrate monitor command flags
 *
965 966 967
 * Migrate non-shared storage using the NBD protocol to the server running
 * inside the qemu process on dst and wait until the copy converges.
 * On success update @migrate_flags so we don't tell 'migrate' command
968
 * to do the very same operation. On failure, the caller is
969 970
 * expected to call qemuMigrationSrcNBDCopyCancel to stop all
 * running copy operations.
971 972 973 974 975
 *
 * Returns 0 on success (@migrate_flags updated),
 *        -1 otherwise.
 */
static int
976 977 978 979 980 981 982 983
qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
                               qemuMigrationCookiePtr mig,
                               const char *host,
                               unsigned long speed,
                               unsigned int *migrate_flags,
                               size_t nmigrate_disks,
                               const char **migrate_disks,
984 985 986
                               virConnectPtr dconn,
                               const char *tlsAlias,
                               unsigned int flags)
987 988 989
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int port;
990
    size_t i;
991
    unsigned long long mirror_speed = speed;
992
    bool mirror_shallow = *migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
993
    int rv;
994
    VIR_AUTOUNREF(virQEMUDriverConfigPtr) cfg = virQEMUDriverGetConfig(driver);
995 996

    VIR_DEBUG("Starting drive mirrors for domain %s", vm->def->name);
997

998 999 1000 1001
    if (mirror_speed > LLONG_MAX >> 20) {
        virReportError(VIR_ERR_OVERFLOW,
                       _("bandwidth must be less than %llu"),
                       LLONG_MAX >> 20);
1002
        return -1;
1003 1004 1005
    }
    mirror_speed <<= 20;

1006 1007 1008 1009 1010 1011 1012
    /* steal NBD port and thus prevent its propagation back to destination */
    port = mig->nbd->port;
    mig->nbd->port = 0;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];

1013
        /* check whether disk should be migrated */
1014
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
1015 1016
            continue;

1017
        if (qemuMigrationSrcNBDStorageCopyOne(driver, vm, disk, host, port,
1018
                                              mirror_speed, mirror_shallow,
1019
                                              tlsAlias, flags) < 0)
1020
            return -1;
1021

1022
        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
1023
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
1024
            return -1;
1025
        }
1026
    }
1027

1028
    while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
1029
        if (rv < 0)
1030
            return -1;
1031

1032
        if (priv->job.abortJob) {
1033
            priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
1034 1035 1036
            virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                           qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                           _("canceled by client"));
1037
            return -1;
1038
        }
1039

1040 1041 1042
        if (dconn && virConnectIsAlive(dconn) <= 0) {
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Lost connection to destination host"));
1043
            return -1;
1044 1045
        }

1046
        if (virDomainObjWait(vm) < 0)
1047
            return -1;
1048 1049
    }

1050 1051
    qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                     priv->job.current);
1052

1053
    /* Okay, all disks are ready. Modify migrate_flags */
1054 1055 1056
    *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                        QEMU_MONITOR_MIGRATE_NON_SHARED_INC);

1057
    return 0;
1058
}
1059

1060

1061
/**
1062
 * qemuMigrationSrcIsAllowedHostdev:
1063 1064
 * @def: domain definition
 *
1065
 * Checks that @def does not contain any host devices unsupported across
1066 1067 1068
 * migrations. Returns true if the vm is allowed to migrate.
 */
static bool
1069
qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
{
    size_t i;

    /* Migration with USB host devices is allowed, all other devices are
     * forbidden. */
    for (i = 0; i < def->nhostdevs; i++) {
        virDomainHostdevDefPtr hostdev = def->hostdevs[i];
        if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
            hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("domain has assigned non-USB host devices"));
            return false;
        }
    }

    return true;
}


1089
/**
1090
 * qemuMigrationSrcIsAllowed:
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
 * @driver: qemu driver struct
 * @vm: domain object
 * @remote: migration is remote
 * @flags: migration flags (see struct virDomainMigrateFlags)
 *
 * Validates that the configuration of @vm can be migrated in various
 * situations. If @remote is true, the migration happens to remote host. @flags
 * is used to check various special migration types according to the request.
 *
 * Returns true if migration is supported. Reports libvirt error and returns
 * false otherwise.
 */
1103
bool
1104 1105 1106 1107
qemuMigrationSrcIsAllowed(virQEMUDriverPtr driver,
                          virDomainObjPtr vm,
                          bool remote,
                          unsigned int flags)
1108
{
1109
    qemuDomainObjPrivatePtr priv = vm->privateData;
1110
    int nsnapshots;
1111
    int pauseReason;
1112
    size_t i;
1113

1114 1115 1116 1117 1118
    /* perform these checks only when migrating to remote hosts */
    if (remote) {
        nsnapshots = virDomainSnapshotObjListNum(vm->snapshots, NULL, 0);
        if (nsnapshots < 0)
            return false;
1119

1120 1121 1122 1123 1124
        if (nsnapshots > 0) {
            virReportError(VIR_ERR_OPERATION_INVALID,
                           _("cannot migrate domain with %d snapshots"),
                           nsnapshots);
            return false;
1125
        }
1126

1127
        /* cancel migration if disk I/O error is emitted while migrating */
1128
        if (flags & VIR_MIGRATE_ABORT_ON_ERROR &&
1129
            !(flags & VIR_MIGRATE_OFFLINE) &&
1130 1131
            virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
            pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
E
Eric Blake 已提交
1132
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1133
                           _("cannot migrate domain with I/O error"));
E
Eric Blake 已提交
1134 1135
            return false;
        }
1136
    }
1137

1138 1139 1140 1141 1142
    if (virHashSize(priv->dbusVMStates) > 0 &&
        !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DBUS_VMSTATE)) {
        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                       _("domain requires dbus-vmstate support"));
        return false;
1143
    }
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
    for (i = 0; i < vm->def->nnets; i++) {
        virDomainNetDefPtr net = vm->def->nets[i];
        qemuSlirpPtr slirp = QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp;

        if (slirp && !qemuSlirpHasFeature(slirp, QEMU_SLIRP_FEATURE_MIGRATE)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("a slirp-helper cannot be migrated"));
            return false;
        }
    }

1156 1157 1158 1159 1160 1161 1162
    /* following checks don't make sense for offline migration */
    if (!(flags & VIR_MIGRATE_OFFLINE)) {
        if (qemuProcessAutoDestroyActive(driver, vm)) {
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("domain is marked for auto destroy"));
            return false;
        }
1163

1164

1165 1166
        if (qemuDomainHasBlockjob(vm, false)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1167
                           _("domain has active block job"));
1168 1169 1170
            return false;
        }

1171
        if (!qemuMigrationSrcIsAllowedHostdev(vm->def))
1172 1173
            return false;

1174
        if (vm->def->cpu) {
1175 1176 1177
            /* QEMU blocks migration and save with invariant TSC enabled
             * unless TSC frequency is explicitly set.
             */
1178 1179
            if (virCPUCheckFeature(vm->def->os.arch, vm->def->cpu,
                                   "invtsc") == 1) {
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
                bool block = true;

                for (i = 0; i < vm->def->clock.ntimers; i++) {
                    virDomainTimerDefPtr timer = vm->def->clock.timers[i];

                    if (timer->name == VIR_DOMAIN_TIMER_NAME_TSC &&
                        timer->frequency > 0) {
                        block = false;
                        break;
                    }
                }

                if (block) {
                    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                                   _("domain has 'invtsc' CPU feature but "
                                     "TSC frequency is not specified"));
                    return false;
                }
1198
            }
J
Ján Tomko 已提交
1199 1200
        }

1201 1202 1203
        /* Verify that memory device config can be transferred reliably */
        for (i = 0; i < vm->def->nmems; i++) {
            virDomainMemoryDefPtr mem = vm->def->mems[i];
1204

1205 1206 1207 1208 1209
            if (mem->model == VIR_DOMAIN_MEMORY_MODEL_DIMM &&
                mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM) {
                virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                               _("domain's dimm info lacks slot ID "
                                 "or base address"));
1210

1211 1212
                return false;
            }
1213
        }
1214 1215 1216 1217 1218 1219

        if (vm->def->nshmems) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("migration with shmem device is not supported"));
            return false;
        }
1220 1221
    }

1222 1223 1224
    return true;
}

1225
static bool
1226
qemuMigrationSrcIsSafe(virDomainDefPtr def,
1227
                       virQEMUCapsPtr qemuCaps,
1228 1229 1230
                       size_t nmigrate_disks,
                       const char **migrate_disks,
                       unsigned int flags)
1231

1232
{
1233 1234
    bool storagemigration = flags & (VIR_MIGRATE_NON_SHARED_DISK |
                                     VIR_MIGRATE_NON_SHARED_INC);
1235
    size_t i;
1236
    int rc;
1237

1238
    for (i = 0; i < def->ndisks; i++) {
1239
        virDomainDiskDefPtr disk = def->disks[i];
1240
        const char *src = virDomainDiskGetSource(disk);
1241

1242 1243
        /* Disks without any source (i.e. floppies and CD-ROMs)
         * OR readonly are safe. */
1244
        if (virStorageSourceIsEmpty(disk->src) ||
1245
            disk->src->readonly)
1246
            continue;
1247

1248
        /* Disks which are migrated by qemu are safe too. */
1249
        if (storagemigration &&
1250
            qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
1251
            continue;
1252

1253
        /* However, disks on local FS (e.g. ext4) are not safe. */
1254
        if (virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_FILE) {
1255
            if ((rc = virFileIsSharedFS(src)) < 0) {
1256
                return false;
1257 1258 1259 1260 1261
            } else if (rc == 0) {
                virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
                               _("Migration without shared storage is unsafe"));
                return false;
            }
1262 1263 1264 1265
            if ((rc = virStorageFileIsClusterFS(src)) < 0)
                return false;
            else if (rc == 1)
                continue;
1266 1267
        } else if (virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_NETWORK) {
            /* But network disks are safe again. */
1268
            continue;
1269
        }
1270

1271 1272 1273 1274 1275 1276 1277
        /* Our code elsewhere guarantees shared disks are either readonly (in
         * which case cache mode doesn't matter) or used with cache=none or used with cache=directsync */
        if (disk->src->shared ||
            disk->cachemode == VIR_DOMAIN_DISK_CACHE_DISABLE ||
            disk->cachemode == VIR_DOMAIN_DISK_CACHE_DIRECTSYNC)
            continue;

1278 1279 1280 1281 1282
        if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATION_FILE_DROP_CACHE)) {
            VIR_DEBUG("QEMU supports flushing caches; migration is safe");
            continue;
        }

1283 1284
        virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
                       _("Migration may lead to data corruption if disks"
1285
                         " use cache other than none or directsync"));
1286
        return false;
1287 1288 1289 1290 1291
    }

    return true;
}

1292 1293

void
1294 1295
qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
                               virDomainObjPtr vm)
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
{
    virDomainState state;
    int reason;

    state = virDomainObjGetState(vm, &reason);

    if (state != VIR_DOMAIN_PAUSED &&
        state != VIR_DOMAIN_RUNNING)
        return;

    if (state == VIR_DOMAIN_PAUSED &&
        reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
        return;

    VIR_WARN("Migration of domain %s failed during post-copy; "
             "leaving the domain paused", vm->def->name);

    if (state == VIR_DOMAIN_RUNNING) {
        if (qemuProcessStopCPUs(driver, vm,
                                VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
1316
                                QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
1317 1318 1319 1320 1321 1322 1323 1324
            VIR_WARN("Unable to pause guest CPUs for %s", vm->def->name);
    } else {
        virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
                             VIR_DOMAIN_PAUSED_POSTCOPY_FAILED);
    }
}


1325
static int
1326
qemuMigrationSrcWaitForSpice(virDomainObjPtr vm)
1327 1328 1329
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

1330
    if (!priv->job.spiceMigration)
1331 1332
        return 0;

1333
    VIR_DEBUG("Waiting for SPICE to finish migration");
1334 1335
    while (!priv->job.spiceMigrated && !priv->job.abortJob) {
        if (virDomainObjWait(vm) < 0)
1336 1337 1338 1339
            return -1;
    }
    return 0;
}
1340

1341 1342 1343 1344

static void
qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
{
1345
    switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) {
1346 1347 1348 1349
    case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY;
        break;

1350
    case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
1351
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED;
1352 1353 1354
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
1355
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE;
1356 1357 1358
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
1359
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1360 1361 1362
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
1363
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
1364 1365
        break;

1366 1367 1368 1369 1370 1371 1372 1373
    case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED;
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
        break;

1374 1375 1376
    case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
    case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
    case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING:
1377
    case QEMU_MONITOR_MIGRATION_STATUS_LAST:
1378 1379 1380 1381 1382 1383
        break;
    }
}


int
1384 1385 1386 1387 1388
qemuMigrationAnyFetchStats(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           qemuDomainAsyncJob asyncJob,
                           qemuDomainJobInfoPtr jobInfo,
                           char **error)
1389 1390
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1391
    qemuMonitorMigrationStats stats;
1392 1393 1394 1395 1396
    int rv;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

1397
    rv = qemuMonitorGetMigrationStats(priv->mon, &stats, error);
1398 1399 1400 1401

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        return -1;

1402
    jobInfo->stats.mig = stats;
1403 1404

    return 0;
1405 1406 1407
}


1408 1409 1410 1411 1412 1413 1414
static const char *
qemuMigrationJobName(virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    switch (priv->job.asyncJob) {
    case QEMU_ASYNC_JOB_MIGRATION_OUT:
1415
        return _("migration out job");
1416 1417 1418 1419
    case QEMU_ASYNC_JOB_SAVE:
        return _("domain save job");
    case QEMU_ASYNC_JOB_DUMP:
        return _("domain core dump job");
1420 1421 1422 1423 1424 1425 1426 1427 1428
    case QEMU_ASYNC_JOB_NONE:
        return _("undefined");
    case QEMU_ASYNC_JOB_MIGRATION_IN:
        return _("migration in job");
    case QEMU_ASYNC_JOB_SNAPSHOT:
        return _("snapshot job");
    case QEMU_ASYNC_JOB_START:
        return _("start job");
    case QEMU_ASYNC_JOB_LAST:
1429 1430 1431 1432 1433 1434
    default:
        return _("job");
    }
}


1435
static int
1436
qemuMigrationJobCheckStatus(virQEMUDriverPtr driver,
1437
                            virDomainObjPtr vm,
1438
                            qemuDomainAsyncJob asyncJob)
1439 1440 1441
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
1442
    char *error = NULL;
1443
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
1444
    int ret = -1;
1445

1446
    if (!events ||
1447
        jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
1448
        if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
1449 1450
            return -1;
    }
1451

1452 1453
    qemuMigrationUpdateJobType(jobInfo);

1454 1455
    switch (jobInfo->status) {
    case QEMU_DOMAIN_JOB_STATUS_NONE:
1456 1457
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("is not active"));
1458
        goto cleanup;
1459

1460
    case QEMU_DOMAIN_JOB_STATUS_FAILED:
1461
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
1462 1463 1464
                       qemuMigrationJobName(vm),
                       error ? error : _("unexpectedly failed"));
        goto cleanup;
1465

1466
    case QEMU_DOMAIN_JOB_STATUS_CANCELED:
1467 1468
        virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("canceled by client"));
1469
        goto cleanup;
1470

1471 1472
    case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
    case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
1473
    case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
1474
    case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
1475
    case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
1476
    case QEMU_DOMAIN_JOB_STATUS_PAUSED:
1477 1478
        break;
    }
1479 1480 1481 1482 1483 1484

    ret = 0;

 cleanup:
    VIR_FREE(error);
    return ret;
1485 1486 1487
}


1488 1489
enum qemuMigrationCompletedFlags {
    QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR = (1 << 0),
1490
    /* This flag should only be set when run on src host */
1491
    QEMU_MIGRATION_COMPLETED_CHECK_STORAGE  = (1 << 1),
1492
    QEMU_MIGRATION_COMPLETED_POSTCOPY       = (1 << 2),
1493
    QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER = (1 << 3),
1494 1495
};

1496

1497 1498 1499 1500 1501 1502 1503
/**
 * Returns 1 if migration completed successfully,
 *         0 if the domain is still being migrated,
 *         -1 migration failed,
 *         -2 something else failed, we need to cancel migration.
 */
static int
1504 1505 1506 1507 1508
qemuMigrationAnyCompleted(virQEMUDriverPtr driver,
                          virDomainObjPtr vm,
                          qemuDomainAsyncJob asyncJob,
                          virConnectPtr dconn,
                          unsigned int flags)
1509 1510 1511 1512 1513
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
    int pauseReason;

1514
    if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
1515 1516
        goto error;

1517
    /* This flag should only be set when run on src host */
1518
    if (flags & QEMU_MIGRATION_COMPLETED_CHECK_STORAGE &&
1519
        qemuMigrationSrcNBDStorageCopyReady(vm, asyncJob) < 0)
1520 1521
        goto error;

1522
    if (flags & QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR &&
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
        virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
        pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("failed due to I/O error"));
        goto error;
    }

    if (dconn && virConnectIsAlive(dconn) <= 0) {
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Lost connection to destination host"));
        goto error;
    }

1536 1537 1538 1539 1540 1541 1542 1543 1544 1545
    /* Migration was paused before serializing device state, let's return to
     * the caller so that it can finish all block jobs, resume migration, and
     * wait again for the real end of the migration.
     */
    if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER &&
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
        VIR_DEBUG("Migration paused before switchover");
        return 1;
    }

1546 1547 1548 1549 1550
    /* In case of postcopy the source considers migration completed at the
     * moment it switched from active to postcopy-active state. The destination
     * will continue waiting until the migrate state changes to completed.
     */
    if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY &&
1551
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
1552 1553 1554 1555
        VIR_DEBUG("Migration switched to post-copy");
        return 1;
    }

1556
    if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
1557 1558 1559 1560 1561
        return 1;
    else
        return 0;

 error:
1562 1563 1564
    switch (jobInfo->status) {
    case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
    case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
1565
    case QEMU_DOMAIN_JOB_STATUS_PAUSED:
1566
        /* The migration was aborted by us rather than QEMU itself. */
1567
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1568
        return -2;
1569 1570 1571

    case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
        /* Something failed after QEMU already finished the migration. */
1572
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1573
        return -1;
1574 1575 1576 1577

    case QEMU_DOMAIN_JOB_STATUS_FAILED:
    case QEMU_DOMAIN_JOB_STATUS_CANCELED:
        /* QEMU aborted the migration. */
1578
        return -1;
1579 1580 1581 1582 1583 1584

    case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
    case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
    case QEMU_DOMAIN_JOB_STATUS_NONE:
        /* Impossible. */
        break;
1585
    }
1586 1587

    return -1;
1588 1589 1590
}


1591 1592 1593
/* Returns 0 on success, -2 when migration needs to be cancelled, or -1 when
 * QEMU reports failed migration.
 */
1594
static int
1595 1596 1597 1598 1599
qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  qemuDomainAsyncJob asyncJob,
                                  virConnectPtr dconn,
                                  unsigned int flags)
1600
{
1601
    qemuDomainObjPrivatePtr priv = vm->privateData;
J
Jiri Denemark 已提交
1602
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
1603
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
1604
    int rv;
1605

1606 1607
    jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;

1608 1609
    while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
                                           dconn, flags)) != 1) {
1610 1611
        if (rv < 0)
            return rv;
1612

1613 1614
        if (events) {
            if (virDomainObjWait(vm) < 0) {
1615 1616
                if (virDomainObjIsActive(vm))
                    jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
                return -2;
            }
        } else {
            /* Poll every 50ms for progress & to allow cancellation */
            struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };

            virObjectUnlock(vm);
            nanosleep(&ts, NULL);
            virObjectLock(vm);
        }
1627 1628
    }

1629
    if (events)
1630
        ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL));
1631

1632
    qemuDomainJobInfoUpdateTime(jobInfo);
1633 1634
    qemuDomainJobInfoUpdateDowntime(jobInfo);
    VIR_FREE(priv->job.completed);
1635
    if (VIR_ALLOC(priv->job.completed) == 0) {
1636
        *priv->job.completed = *jobInfo;
1637 1638
        priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
    }
1639

1640 1641 1642 1643
    if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;

1644
    return 0;
1645 1646 1647
}


1648
static int
1649 1650 1651 1652
qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  qemuDomainAsyncJob asyncJob,
                                  bool postcopy)
1653 1654
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1655
    unsigned int flags = 0;
1656 1657 1658 1659 1660 1661 1662
    int rv;

    if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT))
        return 0;

    VIR_DEBUG("Waiting for incoming migration to complete");

1663 1664 1665
    if (postcopy)
        flags = QEMU_MIGRATION_COMPLETED_POSTCOPY;

1666 1667
    while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
                                           NULL, flags)) != 1) {
1668 1669 1670 1671 1672 1673 1674 1675
        if (rv < 0 || virDomainObjWait(vm) < 0)
            return -1;
    }

    return 0;
}


1676
static int
1677 1678 1679 1680
qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 qemuMigrationCookiePtr cookie,
                                 const char *graphicsuri)
1681 1682
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1683 1684
    int ret = -1;
    const char *listenAddress = NULL;
1685
    virSocketAddr addr;
1686 1687 1688 1689 1690
    virURIPtr uri = NULL;
    int type = -1;
    int port = -1;
    int tlsPort = -1;
    const char *tlsSubject = NULL;
1691

1692
    if (!cookie || (!cookie->graphics && !graphicsuri))
1693 1694
        return 0;

1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
    if (graphicsuri && !(uri = virURIParse(graphicsuri)))
        goto cleanup;

    if (cookie->graphics) {
        type = cookie->graphics->type;

        listenAddress = cookie->graphics->listen;

        if (!listenAddress ||
            (virSocketAddrParse(&addr, listenAddress, AF_UNSPEC) > 0 &&
             virSocketAddrIsWildcard(&addr)))
            listenAddress = cookie->remoteHostname;

        port = cookie->graphics->port;
        tlsPort = cookie->graphics->tlsPort;
        tlsSubject = cookie->graphics->tlsSubject;
    }

    if (uri) {
1714
        size_t i;
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741

        if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("unknown graphics type %s"), uri->scheme);
            goto cleanup;
        }

        if (uri->server)
            listenAddress = uri->server;
        if (uri->port > 0)
            port = uri->port;

        for (i = 0; i < uri->paramsCount; i++) {
            virURIParamPtr param = uri->params + i;

            if (STRCASEEQ(param->name, "tlsPort")) {
                if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
                    virReportError(VIR_ERR_INVALID_ARG,
                                   _("invalid tlsPort number: %s"),
                                   param->value);
                    goto cleanup;
                }
            } else if (STRCASEEQ(param->name, "tlsSubject")) {
                tlsSubject = param->value;
            }
        }
    }
1742 1743 1744 1745

    /* QEMU doesn't support VNC relocation yet, so
     * skip it to avoid generating an error
     */
1746 1747 1748 1749
    if (type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
        ret = 0;
        goto cleanup;
    }
1750

1751 1752 1753 1754 1755 1756 1757 1758 1759
    /* Older libvirt sends port == 0 for listen type='none' graphics. It's
     * safe to ignore such requests since relocation to unknown port does
     * not make sense in general.
     */
    if (port <= 0 && tlsPort <= 0) {
        ret = 0;
        goto cleanup;
    }

1760 1761 1762 1763
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
        ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
                                          port, tlsPort, tlsSubject);
1764
        priv->job.spiceMigration = !ret;
1765 1766
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
            ret = -1;
1767
    }
1768

1769
 cleanup:
1770
    virURIFree(uri);
1771 1772 1773 1774
    return ret;
}


1775
static int
1776 1777 1778
qemuMigrationDstOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
                            virDomainObjPtr vm,
                            qemuMigrationCookiePtr cookie)
1779
{
1780 1781
    virDomainNetDefPtr netptr;
    int ret = -1;
1782
    size_t i;
1783 1784 1785 1786 1787 1788 1789 1790

    for (i = 0; i < cookie->network->nnets; i++) {
        netptr = vm->def->nets[i];

        switch (cookie->network->net[i].vporttype) {
        case VIR_NETDEV_VPORT_PROFILE_NONE:
        case VIR_NETDEV_VPORT_PROFILE_8021QBG:
        case VIR_NETDEV_VPORT_PROFILE_8021QBH:
1791
           break;
1792
        case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
1793 1794
            if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
                                                   netptr->ifname) != 0) {
J
Jiri Denemark 已提交
1795 1796 1797
                virReportError(VIR_ERR_INTERNAL_ERROR,
                               _("Unable to run command to set OVS port data for "
                                 "interface %s"), netptr->ifname);
1798 1799 1800
                goto cleanup;
            }
            break;
1801 1802 1803 1804 1805
        default:
            break;
        }
    }

1806
    ret = 0;
1807
 cleanup:
1808 1809 1810 1811
    return ret;
}


1812
int
1813 1814
qemuMigrationDstCheckProtocol(virQEMUCapsPtr qemuCaps,
                              const char *migrateFrom)
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
{
    if (STRPREFIX(migrateFrom, "rdma")) {
        if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("incoming RDMA migration is not supported "
                             "with this QEMU binary"));
            return -1;
        }
    } else if (!STRPREFIX(migrateFrom, "tcp") &&
               !STRPREFIX(migrateFrom, "exec") &&
               !STRPREFIX(migrateFrom, "fd") &&
               !STRPREFIX(migrateFrom, "unix") &&
               STRNEQ(migrateFrom, "stdio")) {
        virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                       _("unknown migration protocol"));
        return -1;
    }

    return 0;
}


char *
1838 1839
qemuMigrationDstGetURI(const char *migrateFrom,
                       int migrateFd)
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
{
    char *uri = NULL;

    if (STREQ(migrateFrom, "stdio"))
        ignore_value(virAsprintf(&uri, "fd:%d", migrateFd));
    else
        ignore_value(VIR_STRDUP(uri, migrateFrom));

    return uri;
}


1852
int
1853 1854 1855 1856
qemuMigrationDstRun(virQEMUDriverPtr driver,
                    virDomainObjPtr vm,
                    const char *uri,
                    qemuDomainAsyncJob asyncJob)
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret = -1;
    int rv;

    VIR_DEBUG("Setting up incoming migration with URI %s", uri);

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    rv = qemuMonitorMigrateIncoming(priv->mon, uri);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        goto cleanup;

    if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
1873
        /* qemuMigrationDstWaitForCompletion is called from the Finish phase */
1874 1875 1876 1877
        ret = 0;
        goto cleanup;
    }

1878
    if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
1879 1880 1881 1882 1883 1884 1885 1886 1887
        goto cleanup;

    ret = 0;

 cleanup:
    return ret;
}


1888 1889 1890 1891 1892 1893
/* This is called for outgoing non-p2p migrations when a connection to the
 * client which initiated the migration was closed but we were waiting for it
 * to follow up with the next phase, that is, in between
 * qemuDomainMigrateBegin3 and qemuDomainMigratePerform3 or
 * qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
 */
1894
static void
1895 1896 1897
qemuMigrationSrcCleanup(virDomainObjPtr vm,
                        virConnectPtr conn,
                        void *opaque)
1898
{
1899
    virQEMUDriverPtr driver = opaque;
1900 1901 1902 1903 1904 1905 1906 1907 1908
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
              vm->def->name, conn,
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
              qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
                                              priv->job.phase));

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
1909
        return;
1910 1911 1912 1913 1914

    VIR_DEBUG("The connection which started outgoing migration of domain %s"
              " was closed; canceling the migration",
              vm->def->name);

1915
    switch ((qemuMigrationJobPhase) priv->job.phase) {
1916 1917 1918 1919 1920 1921 1922 1923 1924
    case QEMU_MIGRATION_PHASE_BEGIN3:
        /* just forget we were about to migrate */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
        VIR_WARN("Migration of domain %s finished but we don't know if the"
                 " domain was successfully started on destination or not",
                 vm->def->name);
1925
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
1926
                                 priv->job.migParams, priv->job.apiFlags);
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
        /* clear the job and let higher levels decide what to do */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3:
        /* cannot be seen without an active migration API; unreachable */
    case QEMU_MIGRATION_PHASE_CONFIRM3:
    case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
        /* all done; unreachable */
    case QEMU_MIGRATION_PHASE_PREPARE:
    case QEMU_MIGRATION_PHASE_FINISH2:
    case QEMU_MIGRATION_PHASE_FINISH3:
        /* incoming migration; unreachable */
    case QEMU_MIGRATION_PHASE_PERFORM2:
        /* single phase outgoing migration; unreachable */
    case QEMU_MIGRATION_PHASE_NONE:
    case QEMU_MIGRATION_PHASE_LAST:
        /* unreachable */
        ;
    }
}

1949

1950
/* The caller is supposed to lock the vm and start a migration job. */
1951
static char *
1952 1953 1954 1955 1956 1957 1958 1959 1960
qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           const char *xmlin,
                           const char *dname,
                           char **cookieout,
                           int *cookieoutlen,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           unsigned long flags)
1961 1962 1963
{
    char *rv = NULL;
    qemuMigrationCookiePtr mig = NULL;
1964
    virDomainDefPtr def = NULL;
1965
    qemuDomainObjPrivatePtr priv = vm->privateData;
1966
    virCapsPtr caps = NULL;
1967
    unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_LOCKSTATE;
1968

1969
    VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, dname=%s,"
1970
              " cookieout=%p, cookieoutlen=%p,"
1971
              " nmigrate_disks=%zu, migrate_disks=%p, flags=0x%lx",
1972
              driver, vm, NULLSTR(xmlin), NULLSTR(dname),
1973 1974
              cookieout, cookieoutlen, nmigrate_disks,
              migrate_disks, flags);
1975

1976 1977 1978
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

1979 1980 1981 1982 1983 1984
    /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
     * Otherwise we will start the async job later in the perform phase losing
     * change protection.
     */
    if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
1985

1986
    if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
1987 1988
        goto cleanup;

1989
    if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
1990 1991
        !qemuMigrationSrcIsSafe(vm->def, priv->qemuCaps,
                                nmigrate_disks, migrate_disks, flags))
1992 1993
        goto cleanup;

1994 1995 1996 1997 1998 1999 2000 2001 2002
    if (flags & VIR_MIGRATE_POSTCOPY &&
        (!(flags & VIR_MIGRATE_LIVE) ||
         flags & VIR_MIGRATE_PAUSED)) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy migration is not supported with non-live "
                         "or paused migration"));
        goto cleanup;
    }

2003 2004 2005 2006 2007 2008
    if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy is not supported with tunnelled migration"));
        goto cleanup;
    }

2009 2010
    if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) {
        if (nmigrate_disks) {
2011 2012 2013 2014 2015 2016
            size_t i, j;
            /* Check user requested only known disk targets. */
            for (i = 0; i < nmigrate_disks; i++) {
                for (j = 0; j < vm->def->ndisks; j++) {
                    if (STREQ(vm->def->disks[j]->dst, migrate_disks[i]))
                        break;
2017 2018
                }

2019 2020 2021 2022
                if (j == vm->def->ndisks) {
                    virReportError(VIR_ERR_INVALID_ARG,
                                   _("disk target %s not found"),
                                   migrate_disks[i]);
2023 2024
                    goto cleanup;
                }
2025 2026 2027
            }

            if (flags & VIR_MIGRATE_TUNNELLED) {
2028
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
2029 2030
                               _("Selecting disks to migrate is not "
                                 "implemented for tunnelled migration"));
2031 2032 2033 2034
                goto cleanup;
            }
        }

2035 2036 2037 2038 2039 2040
        /* TODO support NBD for TUNNELLED migration */
        if (flags & VIR_MIGRATE_TUNNELLED) {
            VIR_WARN("NBD in tunnelled migration is currently not supported");
        } else {
            cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
            priv->nbdPort = 0;
2041 2042 2043
        }
    }

2044
    if (virDomainDefHasMemoryHotplug(vm->def) ||
2045
        ((flags & VIR_MIGRATE_PERSIST_DEST) &&
2046
         vm->newDef && virDomainDefHasMemoryHotplug(vm->newDef)))
2047 2048
        cookieFlags |= QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG;

2049 2050 2051 2052 2053
    if (!qemuDomainVcpuHotplugIsInOrder(vm->def) ||
        ((flags & VIR_MIGRATE_PERSIST_DEST) &&
         vm->newDef && !qemuDomainVcpuHotplugIsInOrder(vm->newDef)))
        cookieFlags |= QEMU_MIGRATION_COOKIE_CPU_HOTPLUG;

2054 2055 2056
    if (priv->origCPU)
        cookieFlags |= QEMU_MIGRATION_COOKIE_CPU;

2057 2058
    cookieFlags |= QEMU_MIGRATION_COOKIE_ALLOW_REBOOT;

2059 2060 2061
    if (!(flags & VIR_MIGRATE_OFFLINE))
        cookieFlags |= QEMU_MIGRATION_COOKIE_CAPS;

2062 2063
    if (!(mig = qemuMigrationEatCookie(driver, vm->def,
                                       priv->origname, priv, NULL, 0, 0)))
2064 2065 2066
        goto cleanup;

    if (qemuMigrationBakeCookie(mig, driver, vm,
2067
                                QEMU_MIGRATION_SOURCE,
2068
                                cookieout, cookieoutlen,
2069
                                cookieFlags) < 0)
2070 2071
        goto cleanup;

L
liguang 已提交
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (flags & VIR_MIGRATE_TUNNELLED) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
    }

2094
    if (xmlin) {
2095
        if (!(def = virDomainDefParseString(xmlin, caps, driver->xmlopt, priv->qemuCaps,
2096 2097
                                            VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                            VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
2098 2099
            goto cleanup;

2100
        if (!qemuDomainCheckABIStability(driver, vm, def))
2101 2102
            goto cleanup;

2103
        rv = qemuDomainDefFormatLive(driver, priv->qemuCaps, def, NULL, false, true);
2104
    } else {
2105
        rv = qemuDomainDefFormatLive(driver, priv->qemuCaps, vm->def, priv->origCPU,
2106
                                     false, true);
2107
    }
2108

2109
 cleanup:
2110
    qemuMigrationCookieFree(mig);
2111
    virObjectUnref(caps);
2112
    virDomainDefFree(def);
2113 2114 2115
    return rv;
}

2116
char *
2117 2118 2119 2120 2121 2122 2123 2124 2125
qemuMigrationSrcBegin(virConnectPtr conn,
                      virDomainObjPtr vm,
                      const char *xmlin,
                      const char *dname,
                      char **cookieout,
                      int *cookieoutlen,
                      size_t nmigrate_disks,
                      const char **migrate_disks,
                      unsigned long flags)
2126 2127 2128
{
    virQEMUDriverPtr driver = conn->privateData;
    char *xml = NULL;
2129
    qemuDomainAsyncJob asyncJob;
2130 2131

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
2132 2133
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                  flags) < 0)
2134 2135 2136 2137 2138 2139 2140 2141
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
    } else {
        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_NONE;
    }

2142
    qemuMigrationSrcStoreDomainState(vm);
2143

2144
    if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
2145 2146 2147 2148 2149 2150
        goto endjob;

    /* Check if there is any ejected media.
     * We don't want to require them on the destination.
     */
    if (!(flags & VIR_MIGRATE_OFFLINE) &&
2151
        qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
2152 2153
        goto endjob;

2154 2155 2156
    if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
                                           cookieout, cookieoutlen,
                                           nmigrate_disks, migrate_disks, flags)))
2157 2158 2159 2160 2161 2162 2163
        goto endjob;

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
        /* We keep the job active across API calls until the confirm() call.
         * This prevents any other APIs being invoked while migration is taking
         * place.
         */
2164
        if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
2165
                                 qemuMigrationSrcCleanup) < 0) {
2166
            VIR_FREE(xml);
2167
            goto endjob;
2168
        }
2169
        qemuMigrationJobContinue(vm);
2170 2171 2172 2173
    } else {
        goto endjob;
    }

2174
 cleanup:
M
Michal Privoznik 已提交
2175
    virDomainObjEndAPI(&vm);
2176 2177
    return xml;

2178
 endjob:
2179 2180 2181 2182
    if (flags & VIR_MIGRATE_CHANGE_PROTECTION)
        qemuMigrationJobFinish(driver, vm);
    else
        qemuDomainObjEndJob(driver, vm);
2183 2184 2185
    goto cleanup;
}

2186

2187 2188
/* Prepare is the first step, and it runs on the destination host.
 */
2189

2190
static void
2191 2192
qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver,
                               virDomainObjPtr vm)
2193 2194 2195 2196 2197 2198 2199 2200 2201
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
              driver,
              vm->def->name,
              qemuDomainJobTypeToString(priv->job.active),
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob));

2202
    virPortAllocatorRelease(priv->migrationPort);
2203 2204
    priv->migrationPort = 0;

2205 2206 2207 2208 2209
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
        return;
    qemuDomainObjDiscardAsyncJob(driver, vm);
}

2210
static qemuProcessIncomingDefPtr
2211 2212 2213 2214 2215 2216
qemuMigrationDstPrepare(virDomainObjPtr vm,
                        bool tunnel,
                        const char *protocol,
                        const char *listenAddress,
                        unsigned short port,
                        int fd)
2217 2218
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
2219
    qemuProcessIncomingDefPtr inc = NULL;
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
    char *migrateFrom = NULL;

    if (tunnel) {
        if (VIR_STRDUP(migrateFrom, "stdio") < 0)
            goto cleanup;
    } else {
        bool encloseAddress = false;
        bool hostIPv6Capable = false;
        struct addrinfo *info = NULL;
        struct addrinfo hints = { .ai_flags = AI_ADDRCONFIG,
                                  .ai_socktype = SOCK_STREAM };
        const char *incFormat;

        if (getaddrinfo("::", NULL, &hints, &info) == 0) {
            freeaddrinfo(info);
            hostIPv6Capable = true;
        }

        if (listenAddress) {
            if (virSocketAddrNumericFamily(listenAddress) == AF_INET6) {
                if (!hostIPv6Capable) {
                    virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                                   _("host isn't capable of IPv6"));
                    goto cleanup;
                }
                /* IPv6 address must be escaped in brackets on the cmd line */
                encloseAddress = true;
            } else {
                /* listenAddress is a hostname or IPv4 */
            }
2250
        } else if (hostIPv6Capable) {
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
            /* Listen on :: instead of 0.0.0.0 if QEMU understands it
             * and there is at least one IPv6 address configured
             */
            listenAddress = "::";
            encloseAddress = true;
        } else {
            listenAddress = "0.0.0.0";
        }

        /* QEMU will be started with
         *   -incoming protocol:[<IPv6 addr>]:port,
         *   -incoming protocol:<IPv4 addr>:port, or
         *   -incoming protocol:<hostname>:port
         */
        if (encloseAddress)
            incFormat = "%s:[%s]:%d";
        else
            incFormat = "%s:%s:%d";
        if (virAsprintf(&migrateFrom, incFormat,
                        protocol, listenAddress, port) < 0)
            goto cleanup;
    }

2274 2275
    inc = qemuProcessIncomingDefNew(priv->qemuCaps, listenAddress,
                                    migrateFrom, fd, NULL);
2276

2277
 cleanup:
2278 2279
    VIR_FREE(migrateFrom);
    return inc;
2280 2281
}

2282
static int
2283
qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
2284
                           virConnectPtr dconn,
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           virDomainDefPtr *def,
                           const char *origname,
                           virStreamPtr st,
                           const char *protocol,
                           unsigned short port,
                           bool autoPort,
                           const char *listenAddress,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           int nbdPort,
2299
                           qemuMigrationParamsPtr migParams,
2300
                           unsigned long flags)
2301 2302
{
    virDomainObjPtr vm = NULL;
2303
    virObjectEventPtr event = NULL;
2304
    virErrorPtr origErr;
2305
    int ret = -1;
2306
    int dataFD[2] = { -1, -1 };
2307
    qemuDomainObjPrivatePtr priv = NULL;
2308
    qemuMigrationCookiePtr mig = NULL;
2309
    bool tunnel = !!st;
J
Jiri Denemark 已提交
2310
    char *xmlout = NULL;
L
liguang 已提交
2311
    unsigned int cookieFlags;
2312
    unsigned int startFlags;
2313
    virCapsPtr caps = NULL;
2314
    qemuProcessIncomingDefPtr incoming = NULL;
2315
    bool taint_hook = false;
2316 2317 2318
    bool stopProcess = false;
    bool relabel = false;
    int rv;
2319
    char *tlsAlias = NULL;
2320

2321 2322
    virNWFilterReadLockFilterUpdates();

L
liguang 已提交
2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (tunnel) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
2343 2344
        cookieFlags = 0;
    } else {
2345 2346
        cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS |
                      QEMU_MIGRATION_COOKIE_CAPS;
L
liguang 已提交
2347 2348
    }

2349 2350 2351 2352 2353 2354 2355 2356 2357
    if (flags & VIR_MIGRATE_POSTCOPY &&
        (!(flags & VIR_MIGRATE_LIVE) ||
         flags & VIR_MIGRATE_PAUSED)) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy migration is not supported with non-live "
                         "or paused migration"));
        goto cleanup;
    }

2358 2359 2360 2361 2362 2363
    if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy is not supported with tunnelled migration"));
        goto cleanup;
    }

2364 2365 2366
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

2367
    if (!qemuMigrationSrcIsAllowedHostdev(*def))
2368 2369
        goto cleanup;

J
Jiri Denemark 已提交
2370 2371 2372 2373 2374
    /* Let migration hook filter domain XML */
    if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
        char *xml;
        int hookret;

2375
        if (!(xml = qemuDomainDefFormatXML(driver, NULL, *def,
2376 2377
                                           VIR_DOMAIN_XML_SECURE |
                                           VIR_DOMAIN_XML_MIGRATABLE)))
J
Jiri Denemark 已提交
2378 2379
            goto cleanup;

2380
        hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, (*def)->name,
J
Jiri Denemark 已提交
2381 2382 2383 2384 2385 2386 2387
                              VIR_HOOK_QEMU_OP_MIGRATE, VIR_HOOK_SUBOP_BEGIN,
                              NULL, xml, &xmlout);
        VIR_FREE(xml);

        if (hookret < 0) {
            goto cleanup;
        } else if (hookret == 0) {
2388
            if (virStringIsEmpty(xmlout)) {
J
Jiri Denemark 已提交
2389 2390 2391 2392 2393 2394
                VIR_DEBUG("Migrate hook filter returned nothing; using the"
                          " original XML");
            } else {
                virDomainDefPtr newdef;

                VIR_DEBUG("Using hook-filtered domain XML: %s", xmlout);
2395
                newdef = virDomainDefParseString(xmlout, caps, driver->xmlopt, NULL,
2396 2397
                                                 VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                                 VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE);
J
Jiri Denemark 已提交
2398 2399 2400
                if (!newdef)
                    goto cleanup;

2401
                if (!qemuDomainDefCheckABIStability(driver, NULL, *def, newdef)) {
J
Jiri Denemark 已提交
2402 2403 2404 2405
                    virDomainDefFree(newdef);
                    goto cleanup;
                }

2406 2407
                virDomainDefFree(*def);
                *def = newdef;
2408 2409 2410 2411
                /* We should taint the domain here. However, @vm and therefore
                 * privateData too are still NULL, so just notice the fact and
                 * taint it later. */
                taint_hook = true;
J
Jiri Denemark 已提交
2412 2413 2414 2415
            }
        }
    }

2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
    /* Parse cookie earlier than adding the domain onto the
     * domain list. Parsing/validation may fail and there's no
     * point in having the domain in the list at that point. */
    if (!(mig = qemuMigrationEatCookie(driver, *def, origname, NULL,
                                       cookiein, cookieinlen,
                                       QEMU_MIGRATION_COOKIE_LOCKSTATE |
                                       QEMU_MIGRATION_COOKIE_NBD |
                                       QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG |
                                       QEMU_MIGRATION_COOKIE_CPU_HOTPLUG |
                                       QEMU_MIGRATION_COOKIE_CPU |
                                       QEMU_MIGRATION_COOKIE_ALLOW_REBOOT |
                                       QEMU_MIGRATION_COOKIE_CAPS)))
        goto cleanup;

2430
    if (!(vm = virDomainObjListAdd(driver->domains, *def,
2431
                                   driver->xmlopt,
2432 2433 2434
                                   VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
                                   VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
                                   NULL)))
2435
        goto cleanup;
2436
    *def = NULL;
2437

2438
    priv = vm->privateData;
2439 2440
    if (VIR_STRDUP(priv->origname, origname) < 0)
        goto cleanup;
2441

2442 2443 2444 2445 2446
    if (taint_hook) {
        /* Domain XML has been altered by a hook script. */
        priv->hookRun = true;
    }

2447 2448
    if (STREQ_NULLABLE(protocol, "rdma") &&
        !virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
M
Michael R. Hines 已提交
2449 2450 2451 2452 2453 2454
        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                       _("cannot start RDMA migration with no memory hard "
                         "limit set"));
        goto cleanup;
    }

2455 2456 2457
    if (qemuMigrationDstPrecreateStorage(vm, mig->nbd,
                                         nmigrate_disks, migrate_disks,
                                         !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
2458 2459
        goto cleanup;

2460 2461
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
                              flags) < 0)
2462
        goto cleanup;
2463
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
2464 2465 2466 2467

    /* Domain starts inactive, even if the domain XML had an id field. */
    vm->def->id = -1;

L
liguang 已提交
2468 2469 2470
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

2471 2472
    if (tunnel &&
        (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
2473 2474
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
2475
        goto stopjob;
2476 2477
    }

2478 2479
    startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;

2480
    if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
2481
                        true, startFlags) < 0)
2482
        goto stopjob;
2483
    stopProcess = true;
2484

2485 2486
    priv->allowReboot = mig->allowReboot;

2487 2488 2489
    if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol,
                                             listenAddress, port,
                                             dataFD[0])))
2490
        goto stopjob;
2491

2492
    if (qemuProcessPrepareDomain(driver, vm, startFlags) < 0)
2493 2494
        goto stopjob;

2495
    if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
2496 2497
        goto stopjob;

2498
    rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2499 2500
                           incoming, NULL,
                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
2501
                           startFlags);
2502 2503 2504
    if (rv < 0) {
        if (rv == -2)
            relabel = true;
2505
        goto stopjob;
2506
    }
2507
    relabel = true;
2508

2509 2510 2511 2512
    if (tunnel) {
        if (virFDStreamOpen(st, dataFD[1]) < 0) {
            virReportSystemError(errno, "%s",
                                 _("cannot pass pipe for tunnelled migration"));
2513
            goto stopjob;
2514
        }
2515
        dataFD[1] = -1; /* 'st' owns the FD now & will close it */
2516 2517
    }

2518
    if (STREQ_NULLABLE(protocol, "rdma") &&
M
Michael R. Hines 已提交
2519
        virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
2520
        goto stopjob;
M
Michael R. Hines 已提交
2521 2522
    }

2523
    if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2524
                                 migParams, mig->caps->automatic) < 0)
2525 2526 2527 2528 2529 2530 2531
        goto stopjob;

    /* Migrations using TLS need to add the "tls-creds-x509" object and
     * set the migration TLS parameters */
    if (flags & VIR_MIGRATE_TLS) {
        if (qemuMigrationParamsEnableTLS(driver, vm, true,
                                         QEMU_ASYNC_JOB_MIGRATION_IN,
2532
                                         &tlsAlias, NULL,
2533 2534 2535 2536 2537 2538 2539
                                         migParams) < 0)
            goto stopjob;
    } else {
        if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
            goto stopjob;
    }

2540 2541
    if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
                                 migParams) < 0)
2542 2543
        goto stopjob;

2544 2545 2546
    if (mig->nbd &&
        flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
        virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558
        const char *nbdTLSAlias = NULL;

        if (flags & VIR_MIGRATE_TLS) {
            if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_TLS)) {
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                               _("QEMU NBD server does not support TLS transport"));
                goto stopjob;
            }

            nbdTLSAlias = tlsAlias;
        }

2559 2560
        if (qemuMigrationDstStartNBDServer(driver, vm, incoming->address,
                                           nmigrate_disks, migrate_disks,
2561
                                           nbdPort, nbdTLSAlias) < 0) {
2562
            goto stopjob;
2563
        }
2564
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
2565 2566
    }

2567 2568 2569 2570 2571 2572 2573 2574 2575
    if (mig->lockState) {
        VIR_DEBUG("Received lockstate %s", mig->lockState);
        VIR_FREE(priv->lockState);
        priv->lockState = mig->lockState;
        mig->lockState = NULL;
    } else {
        VIR_DEBUG("Received no lockstate");
    }

2576
    if (incoming->deferredURI &&
2577 2578
        qemuMigrationDstRun(driver, vm, incoming->deferredURI,
                            QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
2579 2580
        goto stopjob;

2581
    if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2582 2583 2584
                                 false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
        goto stopjob;

2585
 done:
2586 2587 2588
    if (qemuMigrationBakeCookie(mig, driver, vm,
                                QEMU_MIGRATION_DESTINATION,
                                cookieout, cookieoutlen, cookieFlags) < 0) {
2589 2590 2591 2592 2593 2594 2595
        /* We could tear down the whole guest here, but
         * cookie data is (so far) non-critical, so that
         * seems a little harsh. We'll just warn for now.
         */
        VIR_WARN("Unable to encode migration cookie");
    }

2596
    if (qemuDomainCleanupAdd(vm, qemuMigrationDstPrepareCleanup) < 0)
2597
        goto stopjob;
2598

L
liguang 已提交
2599 2600
    if (!(flags & VIR_MIGRATE_OFFLINE)) {
        virDomainAuditStart(vm, "migrated", true);
2601
        event = virDomainEventLifecycleNewFromObj(vm,
L
liguang 已提交
2602 2603 2604
                                         VIR_DOMAIN_EVENT_STARTED,
                                         VIR_DOMAIN_EVENT_STARTED_MIGRATED);
    }
2605

2606 2607 2608 2609
    /* We keep the job active across API calls until the finish() call.
     * This prevents any other APIs being invoked while incoming
     * migration is taking place.
     */
2610
    qemuMigrationJobContinue(vm);
2611

2612 2613
    if (autoPort)
        priv->migrationPort = port;
2614 2615 2616 2617 2618
    /* in this case port is not auto selected and we don't need to manage it
     * anymore after cookie is baked
     */
    if (nbdPort != 0)
        priv->nbdPort = 0;
2619
    ret = 0;
2620

2621
 cleanup:
2622
    virErrorPreserveLast(&origErr);
2623
    VIR_FREE(tlsAlias);
2624
    qemuProcessIncomingDefFree(incoming);
J
Jiri Denemark 已提交
2625
    VIR_FREE(xmlout);
2626 2627
    VIR_FORCE_CLOSE(dataFD[0]);
    VIR_FORCE_CLOSE(dataFD[1]);
2628 2629 2630
    if (ret < 0 && priv) {
        /* priv is set right after vm is added to the list of domains
         * and there is no 'goto cleanup;' in the middle of those */
2631
        VIR_FREE(priv->origname);
2632 2633 2634 2635
        /* release if port is auto selected which is not the case if
         * it is given in parameters
         */
        if (nbdPort == 0)
2636
            virPortAllocatorRelease(priv->nbdPort);
2637
        priv->nbdPort = 0;
2638
        virDomainObjRemoveTransientDef(vm);
2639
        qemuDomainRemoveInactiveJob(driver, vm);
2640
    }
M
Michal Privoznik 已提交
2641
    virDomainObjEndAPI(&vm);
2642
    virObjectEventStateQueue(driver->domainEventState, event);
2643
    qemuMigrationCookieFree(mig);
2644
    virObjectUnref(caps);
2645
    virNWFilterUnlockFilterUpdates();
2646
    virErrorRestore(&origErr);
2647
    return ret;
2648

2649
 stopjob:
2650
    qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2651
                             priv->job.migParams, priv->job.apiFlags);
2652

2653 2654 2655 2656
    if (stopProcess) {
        unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
        if (!relabel)
            stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
2657
        virDomainAuditStart(vm, "migrated", false);
2658 2659
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                        QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
2660
    }
2661

2662
    qemuMigrationJobFinish(driver, vm);
2663
    goto cleanup;
2664 2665 2666
}


2667 2668 2669 2670 2671
/*
 * This version starts an empty VM listening on a localhost TCP port, and
 * sets up the corresponding virStream to handle the incoming data.
 */
int
2672
qemuMigrationDstPrepareTunnel(virQEMUDriverPtr driver,
2673
                              virConnectPtr dconn,
2674 2675 2676 2677 2678 2679 2680
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              virStreamPtr st,
                              virDomainDefPtr *def,
                              const char *origname,
2681
                              qemuMigrationParamsPtr migParams,
2682
                              unsigned long flags)
2683
{
2684
    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2685
              "cookieout=%p, cookieoutlen=%p, st=%p, def=%p, "
2686
              "origname=%s, flags=0x%lx",
2687
              driver, dconn, NULLSTR(cookiein), cookieinlen,
2688
              cookieout, cookieoutlen, st, *def, origname, flags);
2689

2690 2691 2692 2693 2694 2695
    if (st == NULL) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("tunnelled migration requested but NULL stream passed"));
        return -1;
    }

2696 2697 2698
    return qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
                                      cookieout, cookieoutlen, def, origname,
                                      st, NULL, 0, false, NULL, 0, NULL, 0,
2699
                                      migParams, flags);
2700 2701 2702
}


2703
static virURIPtr
2704
qemuMigrationAnyParseURI(const char *uri, bool *wellFormed)
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
{
    char *tmp = NULL;
    virURIPtr parsed;

    /* For compatibility reasons tcp://... URIs are sent as tcp:...
     * We need to transform them to a well-formed URI before parsing. */
    if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri + 4, "//")) {
        if (virAsprintf(&tmp, "tcp://%s", uri + 4) < 0)
            return NULL;
        uri = tmp;
    }

    parsed = virURIParse(uri);
    if (parsed && wellFormed)
        *wellFormed = !tmp;
    VIR_FREE(tmp);

    return parsed;
}


2726
int
2727
qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
2728
                              virConnectPtr dconn,
2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              const char *uri_in,
                              char **uri_out,
                              virDomainDefPtr *def,
                              const char *origname,
                              const char *listenAddress,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
                              int nbdPort,
2741
                              qemuMigrationParamsPtr migParams,
2742
                              unsigned long flags)
2743
{
2744 2745
    unsigned short port = 0;
    bool autoPort = true;
2746 2747
    char *hostname = NULL;
    int ret = -1;
2748
    virURIPtr uri = NULL;
2749 2750
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    const char *migrateHost = cfg->migrateHost;
J
Jiri Denemark 已提交
2751

2752
    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2753
              "cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
2754
              "def=%p, origname=%s, listenAddress=%s, "
2755
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, flags=0x%lx",
2756
              driver, dconn, NULLSTR(cookiein), cookieinlen,
2757
              cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
2758
              *def, origname, NULLSTR(listenAddress),
2759
              nmigrate_disks, migrate_disks, nbdPort, flags);
2760

2761 2762
    *uri_out = NULL;

2763 2764 2765
    /* The URI passed in may be NULL or a string "tcp://somehostname:port".
     *
     * If the URI passed in is NULL then we allocate a port number
2766 2767 2768
     * from our pool of port numbers, and if the migrateHost is configured,
     * we return a URI of "tcp://migrateHost:port", otherwise return a URI
     * of "tcp://ourhostname:port".
2769 2770 2771 2772 2773 2774
     *
     * If the URI passed in is not NULL then we try to parse out the
     * port number and use that (note that the hostname is assumed
     * to be a correct hostname which refers to the target machine).
     */
    if (uri_in == NULL) {
2775 2776 2777
        bool encloseAddress = false;
        const char *incFormat;

2778
        if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2779
            goto cleanup;
2780

2781
        if (migrateHost != NULL) {
2782 2783
            if (virSocketAddrNumericFamily(migrateHost) == AF_INET6)
                encloseAddress = true;
2784

2785
            if (VIR_STRDUP(hostname, migrateHost) < 0)
2786 2787 2788 2789 2790
                goto cleanup;
        } else {
            if ((hostname = virGetHostname()) == NULL)
                goto cleanup;
        }
2791 2792

        if (STRPREFIX(hostname, "localhost")) {
2793 2794 2795
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("hostname on destination resolved to localhost,"
                             " but migration requires an FQDN"));
2796 2797 2798 2799 2800
            goto cleanup;
        }

        /* XXX this really should have been a properly well-formed
         * URI, but we can't add in tcp:// now without breaking
2801
         * compatibility with old targets. We at least make the
2802 2803
         * new targets accept both syntaxes though.
         */
2804 2805 2806 2807 2808 2809
        if (encloseAddress)
            incFormat = "%s:[%s]:%d";
        else
            incFormat = "%s:%s:%d";

        if (virAsprintf(uri_out, incFormat, "tcp", hostname, port) < 0)
2810 2811
            goto cleanup;
    } else {
2812
        bool well_formed_uri;
J
Ján Tomko 已提交
2813

2814
        if (!(uri = qemuMigrationAnyParseURI(uri_in, &well_formed_uri)))
2815
            goto cleanup;
J
Ján Tomko 已提交
2816

2817 2818 2819 2820 2821 2822 2823
        if (uri->scheme == NULL) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("missing scheme in migration URI: %s"),
                           uri_in);
            goto cleanup;
        }

M
Michael R. Hines 已提交
2824 2825
        if (STRNEQ(uri->scheme, "tcp") &&
            STRNEQ(uri->scheme, "rdma")) {
2826 2827 2828
            virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
                           _("unsupported scheme %s in migration URI %s"),
                           uri->scheme, uri_in);
J
Ján Tomko 已提交
2829 2830 2831 2832 2833 2834 2835 2836 2837 2838
            goto cleanup;
        }

        if (uri->server == NULL) {
            virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
                                                  " URI: %s"), uri_in);
            goto cleanup;
        }

        if (uri->port == 0) {
2839
            if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2840
                goto cleanup;
2841

2842
            /* Send well-formed URI only if uri_in was well-formed */
2843 2844 2845 2846 2847 2848 2849 2850
            if (well_formed_uri) {
                uri->port = port;
                if (!(*uri_out = virURIFormat(uri)))
                    goto cleanup;
            } else {
                if (virAsprintf(uri_out, "%s:%d", uri_in, port) < 0)
                    goto cleanup;
            }
2851
        } else {
2852 2853
            port = uri->port;
            autoPort = false;
2854 2855 2856 2857 2858 2859
        }
    }

    if (*uri_out)
        VIR_DEBUG("Generated uri_out=%s", *uri_out);

2860
    ret = qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
2861 2862 2863 2864
                                     cookieout, cookieoutlen, def, origname,
                                     NULL, uri ? uri->scheme : "tcp",
                                     port, autoPort, listenAddress,
                                     nmigrate_disks, migrate_disks, nbdPort,
2865
                                     migParams, flags);
2866
 cleanup:
2867
    virURIFree(uri);
2868
    VIR_FREE(hostname);
2869
    virObjectUnref(cfg);
2870
    if (ret != 0) {
2871
        VIR_FREE(*uri_out);
2872
        if (autoPort)
2873
            virPortAllocatorRelease(port);
2874
    }
2875 2876 2877 2878
    return ret;
}


2879
virDomainDefPtr
2880
qemuMigrationAnyPrepareDef(virQEMUDriverPtr driver,
2881
                           virQEMUCapsPtr qemuCaps,
2882 2883 2884
                           const char *dom_xml,
                           const char *dname,
                           char **origname)
2885 2886 2887
{
    virCapsPtr caps = NULL;
    virDomainDefPtr def;
2888
    char *name = NULL;
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898

    if (!dom_xml) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("no domain XML passed"));
        return NULL;
    }

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        return NULL;

2899 2900
    if (!(def = virDomainDefParseString(dom_xml, caps, driver->xmlopt,
                                        qemuCaps,
2901 2902
                                        VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                        VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
2903 2904 2905
        goto cleanup;

    if (dname) {
2906
        name = def->name;
2907 2908 2909 2910 2911 2912
        if (VIR_STRDUP(def->name, dname) < 0) {
            virDomainDefFree(def);
            def = NULL;
        }
    }

2913
 cleanup:
2914
    virObjectUnref(caps);
2915 2916 2917 2918
    if (def && origname)
        *origname = name;
    else
        VIR_FREE(name);
2919 2920 2921 2922
    return def;
}


2923
static int
2924 2925 2926 2927 2928 2929
qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
                             virDomainObjPtr vm,
                             const char *cookiein,
                             int cookieinlen,
                             unsigned int flags,
                             int retcode)
2930 2931
{
    qemuMigrationCookiePtr mig;
2932
    virObjectEventPtr event;
2933 2934
    int rv = -1;
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
2935 2936
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = NULL;
2937

2938
    VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
2939
              "flags=0x%x, retcode=%d",
2940
              driver, vm, NULLSTR(cookiein), cookieinlen,
2941 2942 2943 2944 2945 2946 2947 2948 2949
              flags, retcode);

    virCheckFlags(QEMU_MIGRATION_FLAGS, -1);

    qemuMigrationJobSetPhase(driver, vm,
                             retcode == 0
                             ? QEMU_MIGRATION_PHASE_CONFIRM3
                             : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);

2950 2951
    if (!(mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                       cookiein, cookieinlen,
2952
                                       QEMU_MIGRATION_COOKIE_STATS)))
2953 2954
        goto cleanup;

2955 2956 2957
    if (retcode == 0)
        jobInfo = priv->job.completed;
    else
2958
        VIR_FREE(priv->job.completed);
2959 2960 2961

    /* Update times with the values sent by the destination daemon */
    if (mig->jobInfo && jobInfo) {
2962 2963 2964 2965 2966 2967 2968 2969
        int reason;

        /* We need to refresh migration statistics after a completed post-copy
         * migration since priv->job.completed contains obsolete data from the
         * time we switched to post-copy mode.
         */
        if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
            reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
2970 2971
            qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                       jobInfo, NULL) < 0)
2972 2973
            VIR_WARN("Could not refresh migration statistics");

2974 2975 2976
        qemuDomainJobInfoUpdateTime(jobInfo);
        jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet;
        jobInfo->timeDelta = mig->jobInfo->timeDelta;
2977 2978
        jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set;
        jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime;
2979 2980
    }

2981 2982 2983
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

2984 2985
    /* Did the migration go as planned?  If yes, kill off the domain object.
     * If something failed, resume CPUs, but only if we didn't use post-copy.
2986 2987 2988 2989
     */
    if (retcode == 0) {
        /* If guest uses SPICE and supports seamless migration we have to hold
         * up domain shutdown until SPICE server transfers its data */
2990
        qemuMigrationSrcWaitForSpice(vm);
2991 2992

        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
2993
                        QEMU_ASYNC_JOB_MIGRATION_OUT,
2994 2995 2996
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
        virDomainAuditStop(vm, "migrated");

2997
        event = virDomainEventLifecycleNewFromObj(vm,
2998 2999
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
3000
        virObjectEventStateQueue(driver->domainEventState, event);
3001
        qemuDomainEventEmitJobCompleted(driver, vm);
3002
    } else {
3003
        virErrorPtr orig_err = virSaveLastError();
3004
        int reason;
3005 3006

        /* cancel any outstanding NBD jobs */
3007 3008
        qemuMigrationSrcNBDCopyCancel(driver, vm, false,
                                      QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
3009 3010 3011

        virSetError(orig_err);
        virFreeError(orig_err);
3012

3013
        if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
3014
            reason == VIR_DOMAIN_PAUSED_POSTCOPY)
3015
            qemuMigrationAnyPostcopyFailed(driver, vm);
3016 3017
        else
            qemuMigrationSrcRestoreDomainState(driver, vm);
3018

3019
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
3020
                                 priv->job.migParams, priv->job.apiFlags);
3021

3022
        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
3023 3024 3025
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
    }

3026
 done:
3027 3028 3029
    qemuMigrationCookieFree(mig);
    rv = 0;

3030
 cleanup:
3031 3032 3033 3034 3035
    virObjectUnref(cfg);
    return rv;
}

int
3036 3037 3038 3039 3040 3041
qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        const char *cookiein,
                        int cookieinlen,
                        unsigned int flags,
                        int cancelled)
3042
{
3043
    qemuMigrationJobPhase phase;
3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057
    virQEMUDriverConfigPtr cfg = NULL;
    int ret = -1;

    cfg = virQEMUDriverGetConfig(driver);

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
        goto cleanup;

    if (cancelled)
        phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
    else
        phase = QEMU_MIGRATION_PHASE_CONFIRM3;

    qemuMigrationJobStartPhase(driver, vm, phase);
3058
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
3059
                           qemuMigrationSrcCleanup);
3060

3061 3062 3063
    ret = qemuMigrationSrcConfirmPhase(driver, vm,
                                       cookiein, cookieinlen,
                                       flags, cancelled);
3064

3065
    qemuMigrationJobFinish(driver, vm);
3066
    if (!virDomainObjIsActive(vm)) {
3067
        if (!cancelled && ret == 0 && flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
3068
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
3069 3070
            vm->persistent = 0;
        }
3071
        qemuDomainRemoveInactiveJob(driver, vm);
3072 3073
    }

3074
 cleanup:
M
Michal Privoznik 已提交
3075
    virDomainObjEndAPI(&vm);
3076 3077 3078 3079 3080
    virObjectUnref(cfg);
    return ret;
}


3081 3082
enum qemuMigrationDestinationType {
    MIGRATION_DEST_HOST,
3083
    MIGRATION_DEST_CONNECT_HOST,
3084
    MIGRATION_DEST_FD,
3085
};
3086

3087 3088 3089 3090
enum qemuMigrationForwardType {
    MIGRATION_FWD_DIRECT,
    MIGRATION_FWD_STREAM,
};
3091

3092 3093 3094 3095 3096 3097
typedef struct _qemuMigrationSpec qemuMigrationSpec;
typedef qemuMigrationSpec *qemuMigrationSpecPtr;
struct _qemuMigrationSpec {
    enum qemuMigrationDestinationType destType;
    union {
        struct {
3098
            const char *protocol;
3099 3100 3101 3102
            const char *name;
            int port;
        } host;

3103 3104 3105 3106
        struct {
            int qemu;
            int local;
        } fd;
3107 3108 3109 3110 3111 3112 3113
    } dest;

    enum qemuMigrationForwardType fwdType;
    union {
        virStreamPtr stream;
    } fwd;
};
3114 3115 3116

#define TUNNEL_SEND_BUF_SIZE 65536

3117 3118 3119 3120 3121 3122 3123
typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
struct _qemuMigrationIOThread {
    virThread thread;
    virStreamPtr st;
    int sock;
    virError err;
3124 3125
    int wakeupRecvFD;
    int wakeupSendFD;
3126 3127
};

3128
static void qemuMigrationSrcIOFunc(void *arg)
3129
{
3130
    qemuMigrationIOThreadPtr data = arg;
3131 3132 3133 3134 3135 3136 3137
    char *buffer = NULL;
    struct pollfd fds[2];
    int timeout = -1;
    virErrorPtr err = NULL;

    VIR_DEBUG("Running migration tunnel; stream=%p, sock=%d",
              data->st, data->sock);
3138

3139
    if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0)
3140
        goto abrt;
3141

3142 3143 3144
    fds[0].fd = data->sock;
    fds[1].fd = data->wakeupRecvFD;

3145
    for (;;) {
3146 3147 3148 3149 3150 3151 3152 3153 3154 3155
        int ret;

        fds[0].events = fds[1].events = POLLIN;
        fds[0].revents = fds[1].revents = 0;

        ret = poll(fds, ARRAY_CARDINALITY(fds), timeout);

        if (ret < 0) {
            if (errno == EAGAIN || errno == EINTR)
                continue;
3156
            virReportSystemError(errno, "%s",
3157 3158
                                 _("poll failed in migration tunnel"));
            goto abrt;
3159
        }
3160 3161 3162 3163 3164 3165 3166

        if (ret == 0) {
            /* We were asked to gracefully stop but reading would block. This
             * can only happen if qemu told us migration finished but didn't
             * close the migration fd. We handle this in the same way as EOF.
             */
            VIR_DEBUG("QEMU forgot to close migration fd");
3167
            break;
3168
        }
3169

3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
        if (fds[1].revents & (POLLIN | POLLERR | POLLHUP)) {
            char stop = 0;

            if (saferead(data->wakeupRecvFD, &stop, 1) != 1) {
                virReportSystemError(errno, "%s",
                                     _("failed to read from wakeup fd"));
                goto abrt;
            }

            VIR_DEBUG("Migration tunnel was asked to %s",
                      stop ? "abort" : "finish");
            if (stop) {
                goto abrt;
            } else {
                timeout = 0;
            }
3186 3187
        }

3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204
        if (fds[0].revents & (POLLIN | POLLERR | POLLHUP)) {
            int nbytes;

            nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
            if (nbytes > 0) {
                if (virStreamSend(data->st, buffer, nbytes) < 0)
                    goto error;
            } else if (nbytes < 0) {
                virReportSystemError(errno, "%s",
                        _("tunnelled migration failed to read from qemu"));
                goto abrt;
            } else {
                /* EOF; get out of here */
                break;
            }
        }
    }
3205

3206 3207
    if (virStreamFinish(data->st) < 0)
        goto error;
3208

3209
    VIR_FORCE_CLOSE(data->sock);
3210 3211
    VIR_FREE(buffer);

3212 3213
    return;

3214
 abrt:
3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225
    err = virSaveLastError();
    if (err && err->code == VIR_ERR_OK) {
        virFreeError(err);
        err = NULL;
    }
    virStreamAbort(data->st);
    if (err) {
        virSetError(err);
        virFreeError(err);
    }

3226
 error:
3227 3228 3229 3230 3231
    /* Let the source qemu know that the transfer cant continue anymore.
     * Don't copy the error for EPIPE as destination has the actual error. */
    VIR_FORCE_CLOSE(data->sock);
    if (!virLastErrorIsSystemErrno(EPIPE))
        virCopyLastError(&data->err);
3232
    virResetLastError();
3233
    VIR_FREE(buffer);
3234 3235 3236 3237
}


static qemuMigrationIOThreadPtr
3238 3239
qemuMigrationSrcStartTunnel(virStreamPtr st,
                            int sock)
3240
{
3241 3242
    qemuMigrationIOThreadPtr io = NULL;
    int wakeupFD[2] = { -1, -1 };
3243

3244 3245 3246 3247
    if (pipe2(wakeupFD, O_CLOEXEC) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to make pipe"));
        goto error;
3248 3249
    }

3250
    if (VIR_ALLOC(io) < 0)
3251
        goto error;
3252

3253 3254
    io->st = st;
    io->sock = sock;
3255 3256
    io->wakeupRecvFD = wakeupFD[0];
    io->wakeupSendFD = wakeupFD[1];
3257 3258

    if (virThreadCreate(&io->thread, true,
3259
                        qemuMigrationSrcIOFunc,
3260 3261 3262
                        io) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to create migration thread"));
3263
        goto error;
3264 3265 3266
    }

    return io;
3267

3268
 error:
3269 3270 3271 3272
    VIR_FORCE_CLOSE(wakeupFD[0]);
    VIR_FORCE_CLOSE(wakeupFD[1]);
    VIR_FREE(io);
    return NULL;
3273 3274 3275
}

static int
3276
qemuMigrationSrcStopTunnel(qemuMigrationIOThreadPtr io, bool error)
3277 3278
{
    int rv = -1;
3279 3280 3281 3282 3283 3284 3285 3286 3287
    char stop = error ? 1 : 0;

    /* make sure the thread finishes its job and is joinable */
    if (safewrite(io->wakeupSendFD, &stop, 1) != 1) {
        virReportSystemError(errno, "%s",
                             _("failed to wakeup migration tunnel"));
        goto cleanup;
    }

3288 3289 3290 3291
    virThreadJoin(&io->thread);

    /* Forward error from the IO thread, to this thread */
    if (io->err.code != VIR_ERR_OK) {
3292 3293 3294 3295
        if (error)
            rv = 0;
        else
            virSetError(&io->err);
3296 3297 3298 3299 3300 3301
        virResetError(&io->err);
        goto cleanup;
    }

    rv = 0;

3302
 cleanup:
3303 3304
    VIR_FORCE_CLOSE(io->wakeupSendFD);
    VIR_FORCE_CLOSE(io->wakeupRecvFD);
3305 3306
    VIR_FREE(io);
    return rv;
3307 3308
}

3309
static int
3310 3311 3312
qemuMigrationSrcConnect(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        qemuMigrationSpecPtr spec)
3313 3314 3315 3316 3317 3318 3319
{
    virNetSocketPtr sock;
    const char *host;
    char *port = NULL;
    int ret = -1;

    host = spec->dest.host.name;
3320
    if (virAsprintf(&port, "%d", spec->dest.host.port) < 0)
3321 3322 3323 3324 3325
        return -1;

    spec->destType = MIGRATION_DEST_FD;
    spec->dest.fd.qemu = -1;

3326
    if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
3327
        goto cleanup;
3328 3329 3330
    if (virNetSocketNewConnectTCP(host, port,
                                  AF_UNSPEC,
                                  &sock) == 0) {
3331
        spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
3332
        virObjectUnref(sock);
3333
    }
3334
    if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0 ||
3335 3336 3337
        spec->dest.fd.qemu == -1)
        goto cleanup;

3338 3339 3340 3341 3342 3343 3344
    /* Migration expects a blocking FD */
    if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
        virReportSystemError(errno, _("Unable to set FD %d blocking"),
                             spec->dest.fd.qemu);
        goto cleanup;
    }

3345 3346
    ret = 0;

3347
 cleanup:
3348 3349 3350 3351 3352 3353
    VIR_FREE(port);
    if (ret < 0)
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
    return ret;
}

3354 3355

static int
3356 3357 3358 3359
qemuMigrationSrcContinue(virQEMUDriverPtr driver,
                         virDomainObjPtr vm,
                         qemuMonitorMigrationStatus status,
                         qemuDomainAsyncJob asyncJob)
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    ret = qemuMonitorMigrateContinue(priv->mon, status);

    if (qemuDomainObjExitMonitor(driver, vm) < 0)
        ret = -1;

    return ret;
}


3376
static int
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390
qemuMigrationSrcRun(virQEMUDriverPtr driver,
                    virDomainObjPtr vm,
                    const char *persist_xml,
                    const char *cookiein,
                    int cookieinlen,
                    char **cookieout,
                    int *cookieoutlen,
                    unsigned long flags,
                    unsigned long resource,
                    qemuMigrationSpecPtr spec,
                    virConnectPtr dconn,
                    const char *graphicsuri,
                    size_t nmigrate_disks,
                    const char **migrate_disks,
3391
                    qemuMigrationParamsPtr migParams)
3392
{
3393
    int ret = -1;
3394 3395
    unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
    qemuDomainObjPrivatePtr priv = vm->privateData;
3396
    qemuMigrationCookiePtr mig = NULL;
3397
    char *tlsAlias = NULL;
3398
    qemuMigrationIOThreadPtr iothread = NULL;
3399
    int fd = -1;
3400
    unsigned long migrate_speed = resource ? resource : priv->migMaxBandwidth;
3401
    virErrorPtr orig_err = NULL;
3402
    unsigned int cookieFlags = 0;
3403
    bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
3404
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
3405
    bool cancel = false;
3406
    unsigned int waitFlags;
3407
    virDomainDefPtr persistDef = NULL;
3408
    char *timestamp;
3409
    int rc;
3410 3411

    VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
3412
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3413 3414
              "spec=%p (dest=%d, fwd=%d), dconn=%p, graphicsuri=%s, "
              "nmigrate_disks=%zu, migrate_disks=%p",
3415 3416
              driver, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, resource,
3417
              spec, spec->destType, spec->fwdType, dconn,
3418
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3419

3420 3421 3422 3423 3424 3425 3426 3427 3428 3429
    if (flags & VIR_MIGRATE_NON_SHARED_DISK) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

    if (flags & VIR_MIGRATE_NON_SHARED_INC) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

3430 3431
    if (virLockManagerPluginUsesState(driver->lockManager) &&
        !cookieout) {
3432 3433 3434 3435
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Migration with lock driver %s requires"
                         " cookie support"),
                       virLockManagerPluginGetName(driver->lockManager));
3436 3437 3438
        return -1;
    }

3439 3440 3441
    if (events)
        priv->signalIOError = abort_on_error;

3442 3443
    if (flags & VIR_MIGRATE_PERSIST_DEST) {
        if (persist_xml) {
3444 3445 3446
            if (!(persistDef = qemuMigrationAnyPrepareDef(driver,
                                                          priv->qemuCaps,
                                                          persist_xml,
3447
                                                          NULL, NULL)))
3448
                goto error;
3449 3450
        } else {
            virDomainDefPtr def = vm->newDef ? vm->newDef : vm->def;
3451
            if (!(persistDef = qemuDomainDefCopy(driver, priv->qemuCaps, def,
3452 3453
                                                 VIR_DOMAIN_XML_SECURE |
                                                 VIR_DOMAIN_XML_MIGRATABLE)))
3454
                goto error;
3455 3456 3457
        }
    }

3458 3459
    mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                 cookiein, cookieinlen,
3460 3461 3462
                                 cookieFlags |
                                 QEMU_MIGRATION_COOKIE_GRAPHICS |
                                 QEMU_MIGRATION_COOKIE_CAPS);
3463
    if (!mig)
3464
        goto error;
3465

3466
    if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
3467 3468
        VIR_WARN("unable to provide data for graphics client relocation");

3469
    if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
3470
                                 migParams, mig->caps->automatic) < 0)
3471 3472
        goto error;

3473
    if (flags & VIR_MIGRATE_TLS) {
3474
        const char *hostname = NULL;
3475 3476 3477 3478

        /* We need to add tls-hostname whenever QEMU itself does not
         * connect directly to the destination. */
        if (spec->destType == MIGRATION_DEST_CONNECT_HOST ||
3479 3480 3481
            spec->destType == MIGRATION_DEST_FD)
            hostname = spec->dest.host.name;

3482
        if (qemuMigrationParamsEnableTLS(driver, vm, false,
3483
                                         QEMU_ASYNC_JOB_MIGRATION_OUT,
3484
                                         &tlsAlias, hostname,
3485 3486
                                         migParams) < 0)
            goto error;
3487
    } else {
3488
        if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
3489
            goto error;
3490 3491
    }

3492 3493 3494 3495
    if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                 migParams) < 0)
        goto error;

3496 3497 3498
    if (migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                         QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
        if (mig->nbd) {
3499 3500 3501 3502
            /* Currently libvirt does not support setting up of the NBD
             * non-shared storage migration with TLS. As we need to honour the
             * VIR_MIGRATE_TLS flag, we need to reject such migration until
             * we implement TLS for NBD. */
3503 3504
            if (flags & VIR_MIGRATE_TLS &&
                !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_DEL)) {
3505 3506 3507 3508 3509
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                               _("NBD migration with TLS is not supported"));
                goto error;
            }

3510
            /* This will update migrate_flags on success */
3511 3512 3513 3514 3515 3516
            if (qemuMigrationSrcNBDStorageCopy(driver, vm, mig,
                                               spec->dest.host.name,
                                               migrate_speed,
                                               &migrate_flags,
                                               nmigrate_disks,
                                               migrate_disks,
3517
                                               dconn, tlsAlias, flags) < 0) {
3518
                goto error;
3519 3520 3521 3522 3523 3524 3525
            }
        } else {
            /* Destination doesn't support NBD server.
             * Fall back to previous implementation. */
            VIR_DEBUG("Destination doesn't support NBD server "
                      "Falling back to previous implementation.");
        }
3526 3527
    }

3528
    /* Before EnterMonitor, since already qemuProcessStopCPUs does that */
3529 3530
    if (!(flags & VIR_MIGRATE_LIVE) &&
        virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
3531 3532
        if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
                                QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3533
            goto error;
3534 3535
    }

3536 3537
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3538
        goto error;
3539

3540
    if (priv->job.abortJob) {
3541 3542
        /* explicitly do this *after* we entered the monitor,
         * as this is a critical section so we are guaranteed
3543
         * priv->job.abortJob will not change */
3544
        priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
3545 3546 3547
        virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                       qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                       _("canceled by client"));
3548
        goto exit_monitor;
3549 3550
    }

3551 3552
    if (qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0)
        goto exit_monitor;
3553

3554 3555
    /* connect to the destination qemu if needed */
    if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
3556
        qemuMigrationSrcConnect(driver, vm, spec) < 0) {
3557
        goto exit_monitor;
3558
    }
3559

3560 3561 3562 3563 3564 3565
    /* log start of migration */
    if ((timestamp = virTimeStringNow()) != NULL) {
        qemuDomainLogAppendMessage(driver, vm, "%s: initiating migration\n", timestamp);
        VIR_FREE(timestamp);
    }

3566
    rc = -1;
3567 3568
    switch (spec->destType) {
    case MIGRATION_DEST_HOST:
M
Michael R. Hines 已提交
3569 3570
        if (STREQ(spec->dest.host.protocol, "rdma") &&
            virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
3571
            goto exit_monitor;
M
Michael R. Hines 已提交
3572
        }
3573 3574 3575 3576
        rc = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
                                      spec->dest.host.protocol,
                                      spec->dest.host.name,
                                      spec->dest.host.port);
3577 3578
        break;

3579 3580 3581 3582
    case MIGRATION_DEST_CONNECT_HOST:
        /* handled above and transformed into MIGRATION_DEST_FD */
        break;

3583
    case MIGRATION_DEST_FD:
3584
        if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3585
            fd = spec->dest.fd.local;
3586 3587
            spec->dest.fd.local = -1;
        }
3588 3589
        rc = qemuMonitorMigrateToFd(priv->mon, migrate_flags,
                                    spec->dest.fd.qemu);
3590 3591
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
        break;
3592
    }
3593 3594

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
3595
        goto error;
3596 3597 3598

    /* From this point onwards we *must* call cancel to abort the
     * migration on source if anything goes wrong */
3599
    cancel = true;
3600

3601
    if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3602
        if (!(iothread = qemuMigrationSrcStartTunnel(spec->fwd.stream, fd)))
3603
            goto error;
3604 3605 3606 3607 3608
        /* If we've created a tunnel, then the 'fd' will be closed in the
         * qemuMigrationIOFunc as data->sock.
         */
        fd = -1;
    }
3609

3610
    waitFlags = QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
3611 3612 3613 3614 3615 3616 3617
    if (abort_on_error)
        waitFlags |= QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR;
    if (mig->nbd)
        waitFlags |= QEMU_MIGRATION_COMPLETED_CHECK_STORAGE;
    if (flags & VIR_MIGRATE_POSTCOPY)
        waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;

3618 3619 3620
    rc = qemuMigrationSrcWaitForCompletion(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT,
                                           dconn, waitFlags);
3621
    if (rc == -2) {
3622
        goto error;
3623 3624 3625 3626 3627
    } else if (rc == -1) {
        /* QEMU reported failed migration, nothing to cancel anymore */
        cancel = false;
        goto error;
    }
3628

3629
    /* When migration completed, QEMU will have paused the CPUs for us.
J
Ján Tomko 已提交
3630
     * Wait for the STOP event to be processed to release the lock state.
3631
     */
J
Ján Tomko 已提交
3632 3633 3634 3635 3636 3637
    while (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
        priv->signalStop = true;
        rc = virDomainObjWait(vm);
        priv->signalStop = false;
        if (rc < 0)
            goto error;
3638
    }
3639

J
Ján Tomko 已提交
3640
    if (mig->nbd &&
3641 3642 3643
        qemuMigrationSrcNBDCopyCancel(driver, vm, true,
                                      QEMU_ASYNC_JOB_MIGRATION_OUT,
                                      dconn) < 0)
3644
        goto error;
3645

3646 3647 3648 3649 3650
    /* When migration was paused before serializing device state we need to
     * resume it now once we finished all block jobs and wait for the real
     * end of the migration.
     */
    if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
3651 3652 3653
        if (qemuMigrationSrcContinue(driver, vm,
                                     QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
                                     QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3654 3655 3656 3657
            goto error;

        waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;

3658 3659 3660
        rc = qemuMigrationSrcWaitForCompletion(driver, vm,
                                               QEMU_ASYNC_JOB_MIGRATION_OUT,
                                               dconn, waitFlags);
3661 3662 3663 3664 3665 3666 3667 3668 3669
        if (rc == -2) {
            goto error;
        } else if (rc == -1) {
            /* QEMU reported failed migration, nothing to cancel anymore */
            cancel = false;
            goto error;
        }
    }

3670 3671 3672 3673
    if (iothread) {
        qemuMigrationIOThreadPtr io;

        VIR_STEAL_PTR(io, iothread);
3674
        if (qemuMigrationSrcStopTunnel(io, false) < 0)
3675
            goto error;
3676 3677 3678
    }

    if (priv->job.completed) {
3679
        priv->job.completed->stopped = priv->job.current->stopped;
3680 3681 3682 3683 3684 3685 3686 3687 3688
        qemuDomainJobInfoUpdateTime(priv->job.completed);
        qemuDomainJobInfoUpdateDowntime(priv->job.completed);
        ignore_value(virTimeMillisNow(&priv->job.completed->sent));
    }

    cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK |
                   QEMU_MIGRATION_COOKIE_STATS;

    if (qemuMigrationCookieAddPersistent(mig, &persistDef) < 0 ||
3689 3690 3691
        qemuMigrationBakeCookie(mig, driver, vm,
                                QEMU_MIGRATION_SOURCE,
                                cookieout, cookieoutlen, cookieFlags) < 0) {
3692 3693
        VIR_WARN("Unable to encode migration cookie");
    }
3694

3695
    ret = 0;
3696

3697
 cleanup:
3698
    VIR_FREE(tlsAlias);
3699
    VIR_FORCE_CLOSE(fd);
3700
    virDomainDefFree(persistDef);
3701 3702
    qemuMigrationCookieFree(mig);

3703 3704 3705
    if (events)
        priv->signalIOError = false;

3706 3707 3708 3709 3710
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

3711 3712
    return ret;

3713
 error:
3714 3715
    orig_err = virSaveLastError();

3716 3717 3718 3719 3720 3721 3722 3723
    if (virDomainObjIsActive(vm)) {
        if (cancel &&
            priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED &&
            qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
            qemuMonitorMigrateCancel(priv->mon);
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
        }
3724

3725 3726
        /* cancel any outstanding NBD jobs */
        if (mig && mig->nbd)
3727 3728 3729
            qemuMigrationSrcNBDCopyCancel(driver, vm, false,
                                          QEMU_ASYNC_JOB_MIGRATION_OUT,
                                          dconn);
3730 3731 3732 3733

        if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
            priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
    }
3734 3735

    if (iothread)
3736
        qemuMigrationSrcStopTunnel(iothread, true);
3737 3738 3739

    goto cleanup;

3740 3741
 exit_monitor:
    ignore_value(qemuDomainObjExitMonitor(driver, vm));
3742
    goto error;
3743 3744
}

3745
/* Perform migration using QEMU's native migrate support,
3746 3747
 * not encrypted obviously
 */
3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762
static int
qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              const char *persist_xml,
                              const char *uri,
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              unsigned long flags,
                              unsigned long resource,
                              virConnectPtr dconn,
                              const char *graphicsuri,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
3763
                              qemuMigrationParamsPtr migParams)
3764
{
3765
    qemuDomainObjPrivatePtr priv = vm->privateData;
M
Martin Kletzander 已提交
3766
    virURIPtr uribits = NULL;
3767
    int ret = -1;
3768 3769 3770
    qemuMigrationSpec spec;

    VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
3771
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3772
              "graphicsuri=%s, nmigrate_disks=%zu migrate_disks=%p",
3773
              driver, vm, uri, NULLSTR(cookiein), cookieinlen,
3774
              cookieout, cookieoutlen, flags, resource,
3775
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3776

3777
    if (!(uribits = qemuMigrationAnyParseURI(uri, NULL)))
3778 3779
        return -1;

3780 3781 3782 3783 3784 3785 3786
    if (uribits->scheme == NULL) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("missing scheme in migration URI: %s"),
                       uri);
        goto cleanup;
    }

M
Michael R. Hines 已提交
3787 3788 3789 3790 3791
    if (STREQ(uribits->scheme, "rdma")) {
        if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("outgoing RDMA migration is not supported "
                             "with this QEMU binary"));
3792
            goto cleanup;
M
Michael R. Hines 已提交
3793
        }
3794
        if (!virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
M
Michael R. Hines 已提交
3795 3796 3797
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("cannot start RDMA migration with no memory hard "
                             "limit set"));
3798
            goto cleanup;
M
Michael R. Hines 已提交
3799 3800 3801
        }
    }

3802 3803 3804 3805
    /* RDMA and multi-fd migration requires QEMU to connect to the destination
     * itself.
     */
    if (STREQ(uribits->scheme, "rdma") || (flags & VIR_MIGRATE_PARALLEL))
3806
        spec.destType = MIGRATION_DEST_HOST;
3807 3808
    else
        spec.destType = MIGRATION_DEST_CONNECT_HOST;
3809
    spec.dest.host.protocol = uribits->scheme;
3810 3811 3812
    spec.dest.host.name = uribits->server;
    spec.dest.host.port = uribits->port;
    spec.fwdType = MIGRATION_FWD_DIRECT;
3813

3814 3815 3816
    ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
                              cookieoutlen, flags, resource, &spec, dconn,
                              graphicsuri, nmigrate_disks, migrate_disks,
3817
                              migParams);
3818 3819 3820 3821

    if (spec.destType == MIGRATION_DEST_FD)
        VIR_FORCE_CLOSE(spec.dest.fd.qemu);

3822
 cleanup:
3823
    virURIFree(uribits);
3824 3825 3826 3827 3828

    return ret;
}


3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843
static int
qemuMigrationSrcPerformTunnel(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              virStreamPtr st,
                              const char *persist_xml,
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              unsigned long flags,
                              unsigned long resource,
                              virConnectPtr dconn,
                              const char *graphicsuri,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
3844
                              qemuMigrationParamsPtr migParams)
3845 3846 3847
{
    int ret = -1;
    qemuMigrationSpec spec;
3848
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
3849
    int fds[2] = { -1, -1 };
3850 3851

    VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
3852
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3853
              "graphicsuri=%s, nmigrate_disks=%zu, migrate_disks=%p",
3854
              driver, vm, st, NULLSTR(cookiein), cookieinlen,
3855
              cookieout, cookieoutlen, flags, resource,
3856
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3857 3858 3859 3860

    spec.fwdType = MIGRATION_FWD_STREAM;
    spec.fwd.stream = st;

3861

3862 3863 3864
    spec.destType = MIGRATION_DEST_FD;
    spec.dest.fd.qemu = -1;
    spec.dest.fd.local = -1;
3865

3866 3867 3868 3869 3870
    if (pipe2(fds, O_CLOEXEC) == 0) {
        spec.dest.fd.qemu = fds[1];
        spec.dest.fd.local = fds[0];
    }
    if (spec.dest.fd.qemu == -1 ||
3871 3872
        qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
                                    spec.dest.fd.qemu) < 0) {
3873 3874 3875
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
        goto cleanup;
3876 3877
    }

3878 3879 3880
    ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen,
                              cookieout, cookieoutlen, flags, resource, &spec,
                              dconn, graphicsuri, nmigrate_disks, migrate_disks,
3881
                              migParams);
3882

3883
 cleanup:
3884 3885
    VIR_FORCE_CLOSE(spec.dest.fd.qemu);
    VIR_FORCE_CLOSE(spec.dest.fd.local);
3886

3887
    virObjectUnref(cfg);
3888 3889 3890 3891
    return ret;
}


3892 3893 3894 3895
/* This is essentially a re-impl of virDomainMigrateVersion2
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
3896 3897 3898 3899 3900 3901 3902 3903
static int
qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver,
                                  virConnectPtr sconn,
                                  virConnectPtr dconn,
                                  virDomainObjPtr vm,
                                  const char *dconnuri,
                                  unsigned long flags,
                                  const char *dname,
3904 3905
                                  unsigned long resource,
                                  qemuMigrationParamsPtr migParams)
3906 3907 3908
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
3909
    char *cookie = NULL;
3910 3911 3912
    char *dom_xml = NULL;
    int cookielen = 0, ret;
    virErrorPtr orig_err = NULL;
3913
    bool cancelled;
3914
    virStreamPtr st = NULL;
3915
    unsigned long destflags;
3916

3917
    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
3918
              "flags=0x%lx, dname=%s, resource=%lu",
3919 3920
              driver, sconn, dconn, vm, NULLSTR(dconnuri),
              flags, NULLSTR(dname), resource);
3921

3922 3923 3924 3925 3926
    /* In version 2 of the protocol, the prepare step is slightly
     * different.  We fetch the domain XML of the source domain
     * and pass it to Prepare2.
     */
    if (!(dom_xml = qemuDomainFormatXML(driver, vm,
3927 3928
                                        QEMU_DOMAIN_FORMAT_LIVE_FLAGS |
                                        VIR_DOMAIN_XML_MIGRATABLE)))
3929 3930 3931 3932 3933
        return -1;

    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

3934 3935
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
3936

3937 3938 3939 3940 3941 3942 3943 3944 3945 3946
    VIR_DEBUG("Prepare2 %p", dconn);
    if (flags & VIR_MIGRATE_TUNNELLED) {
        /*
         * Tunnelled Migrate Version 2 does not support cookies
         * due to missing parameters in the prepareTunnel() API.
         */

        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

3947
        qemuDomainObjEnterRemote(vm);
3948
        ret = dconn->driver->domainMigratePrepareTunnel
3949
            (dconn, st, destflags, dname, resource, dom_xml);
3950 3951
        if (qemuDomainObjExitRemote(vm, true) < 0)
            goto cleanup;
3952
    } else {
3953
        qemuDomainObjEnterRemote(vm);
3954 3955
        ret = dconn->driver->domainMigratePrepare2
            (dconn, &cookie, &cookielen, NULL, &uri_out,
3956
             destflags, dname, resource, dom_xml);
3957 3958
        if (qemuDomainObjExitRemote(vm, true) < 0)
            goto cleanup;
3959 3960 3961
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
3962 3963
        goto cleanup;

3964 3965
    if (!(flags & VIR_MIGRATE_TUNNELLED) &&
        (uri_out == NULL)) {
3966 3967
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare2 did not set uri"));
3968
        cancelled = true;
3969
        orig_err = virSaveLastError();
3970
        goto finish;
3971 3972
    }

3973 3974 3975 3976
    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete.
     */
    VIR_DEBUG("Perform %p", sconn);
3977
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
3978
    if (flags & VIR_MIGRATE_TUNNELLED)
3979 3980 3981
        ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL,
                                            NULL, 0, NULL, NULL,
                                            flags, resource, dconn,
3982
                                            NULL, 0, NULL, migParams);
3983
    else
3984 3985 3986 3987
        ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri_out,
                                            cookie, cookielen,
                                            NULL, NULL, /* No out cookie with v2 migration */
                                            flags, resource, dconn, NULL, 0, NULL,
3988
                                            migParams);
3989 3990 3991 3992

    /* Perform failed. Make sure Finish doesn't overwrite the error */
    if (ret < 0)
        orig_err = virSaveLastError();
3993

3994 3995 3996
    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
3997
    cancelled = ret < 0;
3998

3999
 finish:
4000 4001 4002 4003
    /* In version 2 of the migration protocol, we pass the
     * status code from the sender to the destination host,
     * so it can do any cleanup if the migration failed.
     */
4004
    dname = dname ? dname : vm->def->name;
4005
    VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
4006
    qemuDomainObjEnterRemote(vm);
4007
    ddomain = dconn->driver->domainMigrateFinish2
4008
        (dconn, dname, cookie, cookielen,
4009
         uri_out ? uri_out : dconnuri, destflags, cancelled);
4010 4011
    /* The domain is already gone at this point */
    ignore_value(qemuDomainObjExitRemote(vm, false));
4012 4013
    if (cancelled && ddomain)
        VIR_ERROR(_("finish step ignored that migration was cancelled"));
4014

4015
 cleanup:
4016
    if (ddomain) {
4017
        virObjectUnref(ddomain);
4018 4019 4020 4021
        ret = 0;
    } else {
        ret = -1;
    }
4022

4023
    virObjectUnref(st);
4024 4025 4026 4027 4028 4029

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
4030
    VIR_FREE(cookie);
4031 4032

    return ret;
4033 4034 4035
}


4036 4037 4038 4039
/* This is essentially a re-impl of virDomainMigrateVersion3
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
4040
static int
4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054
qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver,
                                  virConnectPtr sconn,
                                  virConnectPtr dconn,
                                  const char *dconnuri,
                                  virDomainObjPtr vm,
                                  const char *xmlin,
                                  const char *persist_xml,
                                  const char *dname,
                                  const char *uri,
                                  const char *graphicsuri,
                                  const char *listenAddress,
                                  size_t nmigrate_disks,
                                  const char **migrate_disks,
                                  int nbdPort,
4055
                                  qemuMigrationParamsPtr migParams,
4056 4057 4058
                                  unsigned long long bandwidth,
                                  bool useParams,
                                  unsigned long flags)
4059 4060 4061 4062 4063 4064 4065 4066 4067 4068
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
    char *cookiein = NULL;
    char *cookieout = NULL;
    char *dom_xml = NULL;
    int cookieinlen = 0;
    int cookieoutlen = 0;
    int ret = -1;
    virErrorPtr orig_err = NULL;
4069
    bool cancelled = true;
4070
    virStreamPtr st = NULL;
4071
    unsigned long destflags;
4072 4073 4074
    virTypedParameterPtr params = NULL;
    int nparams = 0;
    int maxparams = 0;
4075
    size_t i;
4076
    bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
4077 4078

    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, dconnuri=%s, vm=%p, xmlin=%s, "
4079
              "dname=%s, uri=%s, graphicsuri=%s, listenAddress=%s, "
4080
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
4081
              "bandwidth=%llu, useParams=%d, flags=0x%lx",
4082
              driver, sconn, dconn, NULLSTR(dconnuri), vm, NULLSTR(xmlin),
4083
              NULLSTR(dname), NULLSTR(uri), NULLSTR(graphicsuri),
4084
              NULLSTR(listenAddress), nmigrate_disks, migrate_disks, nbdPort,
4085
              bandwidth, useParams, flags);
4086

4087 4088 4089 4090 4091
    /* Unlike the virDomainMigrateVersion3 counterpart, we don't need
     * to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
     * bit here, because we are already running inside the context of
     * a single job.  */

4092 4093 4094
    dom_xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
                                         &cookieout, &cookieoutlen,
                                         nmigrate_disks, migrate_disks, flags);
4095 4096 4097
    if (!dom_xml)
        goto cleanup;

4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117
    if (useParams) {
        if (virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_XML, dom_xml) < 0)
            goto cleanup;

        if (dname &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
            goto cleanup;

        if (uri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_URI, uri) < 0)
            goto cleanup;

        if (bandwidth &&
            virTypedParamsAddULLong(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_BANDWIDTH,
                                    bandwidth) < 0)
            goto cleanup;
4118 4119 4120 4121 4122 4123

        if (graphicsuri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_GRAPHICS_URI,
                                    graphicsuri) < 0)
            goto cleanup;
4124 4125 4126 4127 4128
        if (listenAddress &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_LISTEN_ADDRESS,
                                    listenAddress) < 0)
            goto cleanup;
4129 4130 4131 4132 4133
        for (i = 0; i < nmigrate_disks; i++)
            if (virTypedParamsAddString(&params, &nparams, &maxparams,
                                        VIR_MIGRATE_PARAM_MIGRATE_DISKS,
                                        migrate_disks[i]) < 0)
                goto cleanup;
4134 4135 4136 4137 4138
        if (nbdPort &&
            virTypedParamsAddInt(&params, &nparams, &maxparams,
                                 VIR_MIGRATE_PARAM_DISKS_PORT,
                                 nbdPort) < 0)
            goto cleanup;
4139

4140 4141
        if (qemuMigrationParamsDump(migParams, &params, &nparams,
                                    &maxparams, &flags) < 0)
4142
            goto cleanup;
4143 4144
    }

4145 4146 4147
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

4148 4149
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
4150

4151
    VIR_DEBUG("Prepare3 %p", dconn);
4152
    VIR_STEAL_PTR(cookiein, cookieout);
4153 4154 4155 4156 4157 4158
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
    if (flags & VIR_MIGRATE_TUNNELLED) {
        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

4159
        qemuDomainObjEnterRemote(vm);
4160 4161 4162 4163 4164 4165 4166 4167 4168
        if (useParams) {
            ret = dconn->driver->domainMigratePrepareTunnel3Params
                (dconn, st, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepareTunnel3
                (dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 destflags, dname, bandwidth, dom_xml);
        }
4169 4170
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4171
    } else {
4172
        qemuDomainObjEnterRemote(vm);
4173 4174 4175 4176 4177 4178 4179 4180 4181
        if (useParams) {
            ret = dconn->driver->domainMigratePrepare3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, &uri_out, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepare3
                (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 uri, &uri_out, destflags, dname, bandwidth, dom_xml);
        }
4182 4183
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4184 4185 4186 4187 4188
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
        goto cleanup;

4189
    if (offline) {
L
liguang 已提交
4190 4191 4192
        VIR_DEBUG("Offline migration, skipping Perform phase");
        VIR_FREE(cookieout);
        cookieoutlen = 0;
4193
        cancelled = false;
L
liguang 已提交
4194 4195 4196
        goto finish;
    }

4197 4198 4199 4200
    if (uri_out) {
        uri = uri_out;
        if (useParams &&
            virTypedParamsReplaceString(&params, &nparams,
4201 4202
                                        VIR_MIGRATE_PARAM_URI, uri_out) < 0) {
            orig_err = virSaveLastError();
4203
            goto finish;
4204
        }
4205
    } else if (!uri && !(flags & VIR_MIGRATE_TUNNELLED)) {
4206 4207
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare3 did not set uri"));
4208
        orig_err = virSaveLastError();
4209 4210 4211 4212 4213 4214 4215 4216
        goto finish;
    }

    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete. The src VM should remain
     * running, but in paused state until the destination can
     * confirm migration completion.
     */
4217
    VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri));
4218
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4219
    VIR_FREE(cookiein);
4220
    VIR_STEAL_PTR(cookiein, cookieout);
4221 4222
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
4223
    if (flags & VIR_MIGRATE_TUNNELLED) {
4224 4225 4226 4227
        ret = qemuMigrationSrcPerformTunnel(driver, vm, st, persist_xml,
                                            cookiein, cookieinlen,
                                            &cookieout, &cookieoutlen,
                                            flags, bandwidth, dconn, graphicsuri,
4228
                                            nmigrate_disks, migrate_disks,
4229
                                            migParams);
4230
    } else {
4231 4232 4233 4234
        ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri,
                                            cookiein, cookieinlen,
                                            &cookieout, &cookieoutlen,
                                            flags, bandwidth, dconn, graphicsuri,
4235
                                            nmigrate_disks, migrate_disks,
4236
                                            migParams);
4237
    }
4238 4239

    /* Perform failed. Make sure Finish doesn't overwrite the error */
4240
    if (ret < 0) {
4241
        orig_err = virSaveLastError();
4242 4243 4244 4245
    } else {
        qemuMigrationJobSetPhase(driver, vm,
                                 QEMU_MIGRATION_PHASE_PERFORM3_DONE);
    }
4246 4247 4248 4249

    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
4250
    cancelled = ret < 0;
4251

4252
 finish:
4253 4254 4255 4256 4257 4258 4259 4260
    /*
     * The status code from the source is passed to the destination.
     * The dest can cleanup in the source indicated it failed to
     * send all migration data. Returns NULL for ddomain if
     * the dest was unable to complete migration.
     */
    VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
    VIR_FREE(cookiein);
4261
    VIR_STEAL_PTR(cookiein, cookieout);
4262 4263
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276

    if (useParams) {
        if (virTypedParamsGetString(params, nparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, NULL) <= 0 &&
            virTypedParamsReplaceString(&params, &nparams,
                                        VIR_MIGRATE_PARAM_DEST_NAME,
                                        vm->def->name) < 0) {
            ddomain = NULL;
        } else {
            qemuDomainObjEnterRemote(vm);
            ddomain = dconn->driver->domainMigrateFinish3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags, cancelled);
4277 4278
            if (qemuDomainObjExitRemote(vm, !offline) < 0)
                goto cleanup;
4279 4280 4281 4282 4283 4284 4285
        }
    } else {
        dname = dname ? dname : vm->def->name;
        qemuDomainObjEnterRemote(vm);
        ddomain = dconn->driver->domainMigrateFinish3
            (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
             dconnuri, uri, destflags, cancelled);
4286 4287
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4288
    }
4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308

    if (cancelled) {
        if (ddomain) {
            VIR_ERROR(_("finish step ignored that migration was cancelled"));
        } else {
            /* If Finish reported a useful error, use it instead of the
             * original "migration unexpectedly failed" error.
             *
             * This is ugly but we can't do better with the APIs we have. We
             * only replace the error if Finish was called with cancelled == 1
             * and reported a real error (old libvirt would report an error
             * from RPC instead of MIGRATE_FINISH_OK), which only happens when
             * the domain died on destination. To further reduce a possibility
             * of false positives we also check that Perform returned
             * VIR_ERR_OPERATION_FAILED.
             */
            if (orig_err &&
                orig_err->domain == VIR_FROM_QEMU &&
                orig_err->code == VIR_ERR_OPERATION_FAILED) {
                virErrorPtr err = virGetLastError();
4309 4310
                if (err &&
                    err->domain == VIR_FROM_QEMU &&
4311 4312 4313 4314 4315 4316 4317
                    err->code != VIR_ERR_MIGRATE_FINISH_OK) {
                    virFreeError(orig_err);
                    orig_err = NULL;
                }
            }
        }
    }
4318

4319 4320 4321 4322 4323 4324 4325
    /* If ddomain is NULL, then we were unable to start
     * the guest on the target, and must restart on the
     * source. There is a small chance that the ddomain
     * is NULL due to an RPC failure, in which case
     * ddomain could in fact be running on the dest.
     * The lock manager plugins should take care of
     * safety in this scenario.
4326
     */
4327
    cancelled = ddomain == NULL;
4328

4329 4330 4331 4332 4333 4334
    /* If finish3 set an error, and we don't have an earlier
     * one we need to preserve it in case confirm3 overwrites
     */
    if (!orig_err)
        orig_err = virSaveLastError();

4335 4336 4337 4338
    /*
     * If cancelled, then src VM will be restarted, else
     * it will be killed
     */
4339
    VIR_DEBUG("Confirm3 %p cancelled=%d vm=%p", sconn, cancelled, vm);
4340
    VIR_FREE(cookiein);
4341
    VIR_STEAL_PTR(cookiein, cookieout);
4342 4343
    cookieinlen = cookieoutlen;
    cookieoutlen = 0;
4344 4345 4346
    ret = qemuMigrationSrcConfirmPhase(driver, vm,
                                       cookiein, cookieinlen,
                                       flags, cancelled);
4347 4348 4349 4350
    /* If Confirm3 returns -1, there's nothing more we can
     * do, but fortunately worst case is that there is a
     * domain left in 'paused' state on source.
     */
4351 4352 4353
    if (ret < 0)
        VIR_WARN("Guest %s probably left in 'paused' state on source",
                 vm->def->name);
4354 4355 4356

 cleanup:
    if (ddomain) {
4357
        virObjectUnref(ddomain);
4358 4359 4360 4361 4362
        ret = 0;
    } else {
        ret = -1;
    }

4363
    virObjectUnref(st);
4364 4365 4366 4367 4368 4369 4370 4371

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
    VIR_FREE(cookiein);
    VIR_FREE(cookieout);
4372
    virTypedParamsFree(params, nparams);
4373 4374 4375 4376
    return ret;
}


4377
static void
4378 4379 4380
qemuMigrationSrcConnectionClosed(virConnectPtr conn,
                                 int reason,
                                 void *opaque)
4381 4382 4383 4384 4385 4386 4387 4388
{
    virDomainObjPtr vm = opaque;

    VIR_DEBUG("conn=%p, reason=%d, vm=%s", conn, reason, vm->def->name);
    virDomainObjBroadcast(vm);
}


4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400
static int virConnectCredType[] = {
    VIR_CRED_AUTHNAME,
    VIR_CRED_PASSPHRASE,
};


static virConnectAuth virConnectAuthConfig = {
    .credtype = virConnectCredType,
    .ncredtype = ARRAY_CARDINALITY(virConnectCredType),
};


4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413
static int
qemuMigrationSrcPerformPeer2Peer(virQEMUDriverPtr driver,
                                 virConnectPtr sconn,
                                 virDomainObjPtr vm,
                                 const char *xmlin,
                                 const char *persist_xml,
                                 const char *dconnuri,
                                 const char *uri,
                                 const char *graphicsuri,
                                 const char *listenAddress,
                                 size_t nmigrate_disks,
                                 const char **migrate_disks,
                                 int nbdPort,
4414
                                 qemuMigrationParamsPtr migParams,
4415 4416 4417 4418
                                 unsigned long flags,
                                 const char *dname,
                                 unsigned long resource,
                                 bool *v3proto)
4419 4420 4421 4422
{
    int ret = -1;
    virConnectPtr dconn = NULL;
    bool p2p;
4423
    virErrorPtr orig_err = NULL;
4424
    bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
4425
    bool dstOffline = false;
4426
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4427
    bool useParams;
4428

4429 4430
    VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, uri=%s, "
              "graphicsuri=%s, listenAddress=%s, nmigrate_disks=%zu, "
4431
              "migrate_disks=%p, nbdPort=%d, flags=0x%lx, dname=%s, "
4432
              "resource=%lu",
4433
              driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4434
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4435 4436
              nmigrate_disks, migrate_disks, nbdPort, flags, NULLSTR(dname),
              resource);
4437

4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
    if (flags & VIR_MIGRATE_TUNNELLED && uri) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("migration URI is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

    if (flags & VIR_MIGRATE_TUNNELLED && listenAddress) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("listen address is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

4452 4453 4454 4455 4456 4457 4458
    if (flags & VIR_MIGRATE_TUNNELLED && nbdPort) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("disk port address is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

4459 4460 4461 4462
    /* the order of operations is important here; we make sure the
     * destination side is completely setup before we touch the source
     */

4463
    qemuDomainObjEnterRemote(vm);
4464
    dconn = virConnectOpenAuth(dconnuri, &virConnectAuthConfig, 0);
4465 4466 4467
    if (qemuDomainObjExitRemote(vm, !offline) < 0)
        goto cleanup;

4468
    if (dconn == NULL) {
4469
        virReportError(VIR_ERR_OPERATION_FAILED,
4470 4471
                       _("Failed to connect to remote libvirt URI %s: %s"),
                       dconnuri, virGetLastErrorMessage());
4472
        virObjectUnref(cfg);
4473 4474 4475
        return -1;
    }

4476 4477
    if (virConnectSetKeepAlive(dconn, cfg->keepAliveInterval,
                               cfg->keepAliveCount) < 0)
4478 4479
        goto cleanup;

4480
    if (virConnectRegisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed,
4481 4482 4483 4484
                                        vm, NULL) < 0) {
        goto cleanup;
    }

4485
    qemuDomainObjEnterRemote(vm);
4486 4487
    p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                   VIR_DRV_FEATURE_MIGRATION_P2P);
4488
        /* v3proto reflects whether the caller used Perform3, but with
4489
         * p2p migrate, regardless of whether Perform2 or Perform3
4490 4491 4492 4493
         * were used, we decide protocol based on what target supports
         */
    *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                        VIR_DRV_FEATURE_MIGRATION_V3);
4494 4495
    useParams = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                         VIR_DRV_FEATURE_MIGRATION_PARAMS);
4496
    if (offline)
4497 4498
        dstOffline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                              VIR_DRV_FEATURE_MIGRATION_OFFLINE);
4499 4500
    if (qemuDomainObjExitRemote(vm, !offline) < 0)
        goto cleanup;
4501

4502
    if (!p2p) {
4503 4504
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Destination libvirt does not support peer-to-peer migration protocol"));
4505 4506 4507
        goto cleanup;
    }

4508 4509
    /* Only xmlin, dname, uri, and bandwidth parameters can be used with
     * old-style APIs. */
4510
    if (!useParams && (graphicsuri || listenAddress || nmigrate_disks)) {
4511 4512 4513 4514 4515 4516
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("Migration APIs with extensible parameters are not "
                         "supported but extended parameters were passed"));
        goto cleanup;
    }

4517
    if (offline && !dstOffline) {
L
liguang 已提交
4518 4519 4520 4521 4522 4523
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("offline migration is not supported by "
                         "the destination host"));
        goto cleanup;
    }

4524 4525 4526 4527 4528 4529 4530
    /* Change protection is only required on the source side (us), and
     * only for v3 migration when begin and perform are separate jobs.
     * But peer-2-peer is already a single job, and we still want to
     * talk to older destinations that would reject the flag.
     * Therefore it is safe to clear the bit here.  */
    flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;

4531
    if (*v3proto) {
4532 4533 4534
        ret = qemuMigrationSrcPerformPeer2Peer3(driver, sconn, dconn, dconnuri, vm, xmlin,
                                                persist_xml, dname, uri, graphicsuri,
                                                listenAddress, nmigrate_disks, migrate_disks,
4535
                                                nbdPort, migParams, resource,
4536
                                                useParams, flags);
4537
    } else {
4538
        ret = qemuMigrationSrcPerformPeer2Peer2(driver, sconn, dconn, vm,
4539
                                                dconnuri, flags, dname, resource,
4540
                                                migParams);
4541
    }
4542

4543
 cleanup:
4544
    orig_err = virSaveLastError();
4545
    qemuDomainObjEnterRemote(vm);
4546
    virConnectUnregisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed);
4547
    virObjectUnref(dconn);
4548
    ignore_value(qemuDomainObjExitRemote(vm, false));
4549 4550 4551 4552
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
4553
    virObjectUnref(cfg);
4554 4555 4556 4557
    return ret;
}


4558 4559 4560 4561 4562 4563
/*
 * This implements perform part of the migration protocol when migration job
 * does not need to be active across several APIs, i.e., peer2peer migration or
 * perform phase of v2 non-peer2peer migration.
 */
static int
4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575
qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
                           virConnectPtr conn,
                           virDomainObjPtr vm,
                           const char *xmlin,
                           const char *persist_xml,
                           const char *dconnuri,
                           const char *uri,
                           const char *graphicsuri,
                           const char *listenAddress,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           int nbdPort,
4576
                           qemuMigrationParamsPtr migParams,
4577 4578 4579 4580 4581 4582 4583 4584
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           unsigned long flags,
                           const char *dname,
                           unsigned long resource,
                           bool v3proto)
4585
{
4586
    virObjectEventPtr event = NULL;
4587
    int ret = -1;
4588
    virErrorPtr orig_err = NULL;
4589
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4590
    qemuDomainObjPrivatePtr priv = vm->privateData;
4591

4592 4593
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                              flags) < 0)
4594 4595
        goto cleanup;

4596
    if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
4597 4598
        goto endjob;

4599
    if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
4600
        goto endjob;
4601

4602
    if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
4603 4604
        !qemuMigrationSrcIsSafe(vm->def, priv->qemuCaps,
                                nmigrate_disks, migrate_disks, flags))
4605
        goto endjob;
4606

4607
    qemuMigrationSrcStoreDomainState(vm);
4608 4609

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
4610 4611 4612
        ret = qemuMigrationSrcPerformPeer2Peer(driver, conn, vm, xmlin, persist_xml,
                                               dconnuri, uri, graphicsuri, listenAddress,
                                               nmigrate_disks, migrate_disks, nbdPort,
4613
                                               migParams, flags, dname, resource,
4614
                                               &v3proto);
4615
    } else {
4616
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
4617 4618 4619
        ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
                                            cookieout, cookieoutlen,
                                            flags, resource, NULL, NULL, 0, NULL,
4620
                                            migParams);
4621
    }
4622 4623
    if (ret < 0)
        goto endjob;
4624

4625 4626 4627 4628
    /*
     * In v3 protocol, the source VM is not killed off until the
     * confirm step.
     */
4629
    if (!v3proto) {
4630
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
4631
                        QEMU_ASYNC_JOB_MIGRATION_OUT,
4632
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
4633
        virDomainAuditStop(vm, "migrated");
4634
        event = virDomainEventLifecycleNewFromObj(vm,
4635 4636
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
4637 4638
    }

4639
 endjob:
4640 4641 4642
    if (ret < 0)
        orig_err = virSaveLastError();

4643 4644 4645 4646
    /* v2 proto has no confirm phase so we need to reset migration parameters
     * here
     */
    if (!v3proto && ret < 0)
4647
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
4648
                                 priv->job.migParams, priv->job.apiFlags);
4649

4650
    qemuMigrationSrcRestoreDomainState(driver, vm);
4651

4652
    qemuMigrationJobFinish(driver, vm);
4653
    if (!virDomainObjIsActive(vm) && ret == 0) {
4654
        if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
4655
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
4656 4657
            vm->persistent = 0;
        }
4658
        qemuDomainRemoveInactiveJob(driver, vm);
4659 4660
    }

4661 4662 4663 4664 4665
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

4666
 cleanup:
4667
    virObjectEventStateQueue(driver->domainEventState, event);
4668
    virObjectUnref(cfg);
4669 4670 4671 4672 4673 4674 4675
    return ret;
}

/*
 * This implements perform phase of v3 migration protocol.
 */
static int
4676 4677 4678 4679 4680 4681 4682 4683
qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
                             virConnectPtr conn,
                             virDomainObjPtr vm,
                             const char *persist_xml,
                             const char *uri,
                             const char *graphicsuri,
                             size_t nmigrate_disks,
                             const char **migrate_disks,
4684
                             qemuMigrationParamsPtr migParams,
4685 4686 4687 4688 4689 4690
                             const char *cookiein,
                             int cookieinlen,
                             char **cookieout,
                             int *cookieoutlen,
                             unsigned long flags,
                             unsigned long resource)
4691
{
4692
    qemuDomainObjPrivatePtr priv = vm->privateData;
4693 4694 4695 4696
    int ret = -1;

    /* If we didn't start the job in the begin phase, start it now. */
    if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
4697 4698
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                  flags) < 0)
4699 4700 4701 4702 4703 4704
            goto cleanup;
    } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
        goto cleanup;
    }

    qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4705
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
4706
                           qemuMigrationSrcCleanup);
4707

4708 4709 4710
    ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
                                        cookieout, cookieoutlen,
                                        flags, resource, NULL, graphicsuri,
4711
                                        nmigrate_disks, migrate_disks, migParams);
4712

4713
    if (ret < 0) {
4714
        qemuMigrationSrcRestoreDomainState(driver, vm);
4715
        goto endjob;
4716
    }
4717 4718 4719

    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);

4720
    if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
4721
                             qemuMigrationSrcCleanup) < 0)
4722 4723
        goto endjob;

4724
 endjob:
4725
    if (ret < 0) {
4726
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
4727
                                 priv->job.migParams, priv->job.apiFlags);
4728
        qemuMigrationJobFinish(driver, vm);
4729
    } else {
4730
        qemuMigrationJobContinue(vm);
4731 4732
    }

4733
    if (!virDomainObjIsActive(vm))
4734
        qemuDomainRemoveInactiveJob(driver, vm);
4735

4736
 cleanup:
4737 4738 4739
    return ret;
}

4740
int
4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752
qemuMigrationSrcPerform(virQEMUDriverPtr driver,
                        virConnectPtr conn,
                        virDomainObjPtr vm,
                        const char *xmlin,
                        const char *persist_xml,
                        const char *dconnuri,
                        const char *uri,
                        const char *graphicsuri,
                        const char *listenAddress,
                        size_t nmigrate_disks,
                        const char **migrate_disks,
                        int nbdPort,
4753
                        qemuMigrationParamsPtr migParams,
4754 4755 4756 4757 4758 4759 4760 4761
                        const char *cookiein,
                        int cookieinlen,
                        char **cookieout,
                        int *cookieoutlen,
                        unsigned long flags,
                        const char *dname,
                        unsigned long resource,
                        bool v3proto)
4762 4763
{
    VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
4764
              "uri=%s, graphicsuri=%s, listenAddress=%s, "
4765
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
4766
              "cookiein=%s, cookieinlen=%d, cookieout=%p, cookieoutlen=%p, "
4767
              "flags=0x%lx, dname=%s, resource=%lu, v3proto=%d",
4768
              driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4769
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4770 4771 4772
              nmigrate_disks, migrate_disks, nbdPort,
              NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen,
              flags, NULLSTR(dname), resource, v3proto);
4773 4774 4775

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
        if (cookieinlen) {
4776 4777
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("received unexpected cookie with P2P migration"));
4778 4779 4780
            return -1;
        }

4781 4782 4783
        return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
                                          graphicsuri, listenAddress,
                                          nmigrate_disks, migrate_disks, nbdPort,
4784
                                          migParams,
4785 4786 4787
                                          cookiein, cookieinlen,
                                          cookieout, cookieoutlen,
                                          flags, dname, resource, v3proto);
4788 4789
    } else {
        if (dconnuri) {
4790 4791
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           "%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
4792 4793 4794 4795
            return -1;
        }

        if (v3proto) {
4796 4797 4798
            return qemuMigrationSrcPerformPhase(driver, conn, vm, persist_xml, uri,
                                                graphicsuri,
                                                nmigrate_disks, migrate_disks,
4799
                                                migParams,
4800 4801 4802
                                                cookiein, cookieinlen,
                                                cookieout, cookieoutlen,
                                                flags, resource);
4803
        } else {
4804 4805 4806
            return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
                                              uri, graphicsuri, listenAddress,
                                              nmigrate_disks, migrate_disks, nbdPort,
4807
                                              migParams,
4808 4809 4810
                                              cookiein, cookieinlen,
                                              cookieout, cookieoutlen, flags,
                                              dname, resource, v3proto);
4811 4812 4813
        }
    }
}
4814

4815
static int
4816
qemuMigrationDstVPAssociatePortProfiles(virDomainDefPtr def)
4817
{
4818
    size_t i;
4819 4820 4821 4822 4823
    int last_good_net = -1;
    virDomainNetDefPtr net;

    for (i = 0; i < def->nnets; i++) {
        net = def->nets[i];
4824
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4825
            if (virNetDevVPortProfileAssociate(net->ifname,
4826
                                               virDomainNetGetActualVirtPortProfile(net),
4827
                                               &net->mac,
4828
                                               virDomainNetGetActualDirectDev(net),
4829
                                               -1,
4830
                                               def->uuid,
4831 4832
                                               VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
                                               false) < 0) {
4833 4834 4835
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("Port profile Associate failed for %s"),
                               net->ifname);
4836
                goto err_exit;
4837
            }
4838
            last_good_net = i;
4839
            VIR_DEBUG("Port profile Associate succeeded for %s", net->ifname);
4840

4841
            if (virNetDevMacVLanVPortProfileRegisterCallback(net->ifname, &net->mac,
4842 4843 4844 4845
                                                             virDomainNetGetActualDirectDev(net), def->uuid,
                                                             virDomainNetGetActualVirtPortProfile(net),
                                                             VIR_NETDEV_VPORT_PROFILE_OP_CREATE))
                goto err_exit;
4846 4847 4848
        }
    }

4849
    return 0;
4850

4851
 err_exit:
4852
    for (i = 0; last_good_net != -1 && i <= last_good_net; i++) {
4853
        net = def->nets[i];
4854
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4855
            ignore_value(virNetDevVPortProfileDisassociate(net->ifname,
4856
                                                           virDomainNetGetActualVirtPortProfile(net),
4857
                                                           &net->mac,
4858
                                                           virDomainNetGetActualDirectDev(net),
4859
                                                           -1,
4860
                                                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH));
4861 4862
        }
    }
4863
    return -1;
4864 4865 4866
}


J
Jiri Denemark 已提交
4867
static int
4868 4869 4870 4871
qemuMigrationDstPersist(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        qemuMigrationCookiePtr mig,
                        bool ignoreSaveError)
J
Jiri Denemark 已提交
4872 4873
{
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4874
    qemuDomainObjPrivatePtr priv = vm->privateData;
J
Jiri Denemark 已提交
4875 4876
    virCapsPtr caps = NULL;
    virDomainDefPtr vmdef;
4877 4878
    virDomainDefPtr oldDef = NULL;
    unsigned int oldPersist = vm->persistent;
J
Jiri Denemark 已提交
4879 4880 4881 4882 4883 4884 4885
    virObjectEventPtr event;
    int ret = -1;

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

    vm->persistent = 1;
4886 4887
    oldDef = vm->newDef;
    vm->newDef = qemuMigrationCookieGetPersistent(mig);
J
Jiri Denemark 已提交
4888

4889 4890
    if (!(vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm,
                                               priv->qemuCaps)))
4891
        goto error;
J
Jiri Denemark 已提交
4892

4893 4894
    if (virDomainSaveConfig(cfg->configDir, driver->caps, vmdef) < 0 &&
        !ignoreSaveError)
4895
        goto error;
J
Jiri Denemark 已提交
4896 4897 4898

    event = virDomainEventLifecycleNewFromObj(vm,
                                              VIR_DOMAIN_EVENT_DEFINED,
4899 4900 4901
                                              oldPersist ?
                                              VIR_DOMAIN_EVENT_DEFINED_UPDATED :
                                              VIR_DOMAIN_EVENT_DEFINED_ADDED);
4902
    virObjectEventStateQueue(driver->domainEventState, event);
J
Jiri Denemark 已提交
4903 4904 4905 4906

    ret = 0;

 cleanup:
4907
    virDomainDefFree(oldDef);
J
Jiri Denemark 已提交
4908 4909 4910
    virObjectUnref(caps);
    virObjectUnref(cfg);
    return ret;
4911 4912 4913 4914 4915 4916 4917

 error:
    virDomainDefFree(vm->newDef);
    vm->persistent = oldPersist;
    vm->newDef = oldDef;
    oldDef = NULL;
    goto cleanup;
J
Jiri Denemark 已提交
4918 4919 4920
}


4921
virDomainPtr
4922 4923 4924 4925 4926 4927 4928 4929 4930 4931
qemuMigrationDstFinish(virQEMUDriverPtr driver,
                       virConnectPtr dconn,
                       virDomainObjPtr vm,
                       const char *cookiein,
                       int cookieinlen,
                       char **cookieout,
                       int *cookieoutlen,
                       unsigned long flags,
                       int retcode,
                       bool v3proto)
4932 4933
{
    virDomainPtr dom = NULL;
4934
    qemuMigrationCookiePtr mig = NULL;
4935
    virErrorPtr orig_err = NULL;
4936
    int cookie_flags = 0;
J
Jiri Denemark 已提交
4937
    qemuDomainObjPrivatePtr priv = vm->privateData;
4938
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4939
    unsigned short port;
4940 4941
    unsigned long long timeReceived = 0;
    virObjectEventPtr event;
4942
    qemuDomainJobInfoPtr jobInfo = NULL;
4943
    bool inPostCopy = false;
4944
    bool doKill = true;
4945

4946
    VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
4947
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, retcode=%d",
4948 4949
              driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, retcode);
4950

4951 4952 4953
    port = priv->migrationPort;
    priv->migrationPort = 0;

4954
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
4955
        qemuMigrationDstErrorReport(driver, vm->def->name);
4956
        goto cleanup;
4957
    }
4958

4959 4960
    ignore_value(virTimeMillisNow(&timeReceived));

4961 4962 4963
    qemuMigrationJobStartPhase(driver, vm,
                               v3proto ? QEMU_MIGRATION_PHASE_FINISH3
                                       : QEMU_MIGRATION_PHASE_FINISH2);
4964

4965
    qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
4966
    VIR_FREE(priv->job.completed);
4967

4968
    cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
4969 4970
                   QEMU_MIGRATION_COOKIE_STATS |
                   QEMU_MIGRATION_COOKIE_NBD;
4971 4972 4973
    /* Some older versions of libvirt always send persistent XML in the cookie
     * even though VIR_MIGRATE_PERSIST_DEST was not used. */
    cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
4974

4975 4976
    if (!(mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                       cookiein, cookieinlen, cookie_flags)))
4977
        goto endjob;
4978

4979
    if (flags & VIR_MIGRATE_OFFLINE) {
4980
        if (retcode == 0 &&
4981
            qemuMigrationDstPersist(driver, vm, mig, false) == 0)
4982
            dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, -1);
4983 4984
        goto endjob;
    }
4985

4986 4987 4988 4989
    if (retcode != 0) {
        /* Check for a possible error on the monitor in case Finish was called
         * earlier than monitor EOF handler got a chance to process the error
         */
4990
        qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
4991 4992
        goto endjob;
    }
4993

4994 4995 4996
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
4997
        qemuMigrationDstErrorReport(driver, vm->def->name);
4998 4999
        goto endjob;
    }
5000

5001
    if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
5002
        goto endjob;
5003

5004
    if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0)
5005
        VIR_WARN("unable to provide network data for relocation");
5006

5007
    if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0)
5008
        goto endjob;
5009

5010 5011
    if (qemuRefreshVirtioChannelState(driver, vm,
                                      QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
5012 5013
        goto endjob;

5014 5015
    if (qemuConnectAgent(driver, vm) < 0)
        goto endjob;
5016

5017
    if (flags & VIR_MIGRATE_PERSIST_DEST) {
5018
        if (qemuMigrationDstPersist(driver, vm, mig, !v3proto) < 0) {
5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029
            /* Hmpf.  Migration was successful, but making it persistent
             * was not.  If we report successful, then when this domain
             * shuts down, management tools are in for a surprise.  On the
             * other hand, if we report failure, then the management tools
             * might try to restart the domain on the source side, even
             * though the domain is actually running on the destination.
             * Pretend success and hope that this is a rare situation and
             * management tools are smart.
             *
             * However, in v3 protocol, the source VM is still available
             * to restart during confirm() step, so we kill it off now.
5030 5031 5032
             */
            if (v3proto)
                goto endjob;
5033
        }
5034
    }
5035

5036 5037 5038
    /* We need to wait for QEMU to process all data sent by the source
     * before starting guest CPUs.
     */
5039 5040 5041
    if (qemuMigrationDstWaitForCompletion(driver, vm,
                                          QEMU_ASYNC_JOB_MIGRATION_IN,
                                          !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
5042 5043 5044 5045 5046 5047
        /* There's not much we can do for v2 protocol since the
         * original domain on the source host is already gone.
         */
        if (v3proto)
            goto endjob;
    }
5048

5049 5050 5051 5052 5053 5054 5055 5056 5057
    /* Now that the state data was transferred we can refresh the actual state
     * of the devices */
    if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
        /* Similarly to the case above v2 protocol will not be able to recover
         * from this. Let's ignore this and perhaps stuff will not break. */
        if (v3proto)
            goto endjob;
    }

5058
    if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
5059 5060
        inPostCopy = true;

5061 5062 5063 5064 5065
    if (!(flags & VIR_MIGRATE_PAUSED)) {
        /* run 'cont' on the destination, which allows migration on qemu
         * >= 0.10.6 to work properly.  This isn't strictly necessary on
         * older qemu's, but it also doesn't hurt anything there
         */
5066
        if (qemuProcessStartCPUs(driver, vm,
5067 5068
                                 inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
                                            : VIR_DOMAIN_RUNNING_MIGRATED,
5069
                                 QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
5070
            if (virGetLastErrorCode() == VIR_ERR_OK)
5071 5072 5073 5074 5075 5076
                virReportError(VIR_ERR_INTERNAL_ERROR,
                               "%s", _("resume operation failed"));
            /* Need to save the current error, in case shutting
             * down the process overwrites it
             */
            orig_err = virSaveLastError();
5077

5078 5079 5080 5081 5082 5083 5084 5085 5086 5087
            /*
             * In v3 protocol, the source VM is still available to
             * restart during confirm() step, so we kill it off
             * now.
             * In v2 protocol, the source is dead, so we leave
             * target in paused state, in case admin can fix
             * things up.
             */
            if (v3proto)
                goto endjob;
5088
        }
5089

5090
        if (inPostCopy)
5091
            doKill = false;
5092
    }
5093

5094
    if (mig->jobInfo) {
5095
        jobInfo = mig->jobInfo;
5096 5097 5098 5099 5100 5101
        mig->jobInfo = NULL;

        if (jobInfo->sent && timeReceived) {
            jobInfo->timeDelta = timeReceived - jobInfo->sent;
            jobInfo->received = timeReceived;
            jobInfo->timeDeltaSet = true;
5102
        }
5103 5104
        qemuDomainJobInfoUpdateTime(jobInfo);
        qemuDomainJobInfoUpdateDowntime(jobInfo);
5105
    }
L
liguang 已提交
5106

5107
    if (inPostCopy) {
5108 5109 5110
        if (qemuMigrationDstWaitForCompletion(driver, vm,
                                              QEMU_ASYNC_JOB_MIGRATION_IN,
                                              false) < 0) {
5111 5112 5113 5114 5115 5116 5117 5118 5119
            goto endjob;
        }
        if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
            virDomainObjSetState(vm,
                                 VIR_DOMAIN_RUNNING,
                                 VIR_DOMAIN_RUNNING_MIGRATED);
        }
    }

5120
    dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, vm->def->id);
5121

5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135
    if (inPostCopy) {
        /* The only RESUME event during post-copy migration is triggered by
         * QEMU when the running domain moves from the source to the
         * destination host, but then the migration keeps running until all
         * modified memory is transferred from the source host. This will
         * result in VIR_DOMAIN_EVENT_RESUMED with RESUMED_POSTCOPY detail.
         * However, our API documentation says we need to fire another RESUMED
         * event at the very end of migration with RESUMED_MIGRATED detail.
         */
        event = virDomainEventLifecycleNewFromObj(vm,
                                                  VIR_DOMAIN_EVENT_RESUMED,
                                                  VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
        virObjectEventStateQueue(driver->domainEventState, event);
    }
5136

5137 5138 5139 5140 5141
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
        virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
        event = virDomainEventLifecycleNewFromObj(vm,
                                                  VIR_DOMAIN_EVENT_SUSPENDED,
                                                  VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
5142
        virObjectEventStateQueue(driver->domainEventState, event);
5143
    }
5144

5145
    if (virDomainObjIsActive(vm) &&
5146
        virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
5147 5148 5149 5150 5151
        VIR_WARN("Failed to save status on vm %s", vm->def->name);

    /* Guest is successfully running, so cancel previous auto destroy */
    qemuProcessAutoDestroyRemove(driver, vm);

5152
 endjob:
5153
    if (!dom &&
5154 5155
        !(flags & VIR_MIGRATE_OFFLINE) &&
        virDomainObjIsActive(vm)) {
5156
        if (doKill) {
5157 5158 5159 5160 5161 5162 5163
            qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                            QEMU_ASYNC_JOB_MIGRATION_IN,
                            VIR_QEMU_PROCESS_STOP_MIGRATED);
            virDomainAuditStop(vm, "failed");
            event = virDomainEventLifecycleNewFromObj(vm,
                                VIR_DOMAIN_EVENT_STOPPED,
                                VIR_DOMAIN_EVENT_STOPPED_FAILED);
5164
            virObjectEventStateQueue(driver->domainEventState, event);
5165
        } else {
5166
            qemuMigrationAnyPostcopyFailed(driver, vm);
5167
        }
5168 5169
    }

5170
    if (dom) {
5171 5172 5173
        if (jobInfo) {
            VIR_STEAL_PTR(priv->job.completed, jobInfo);
            priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
5174
            priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
5175
        }
5176

5177 5178 5179
        if (qemuMigrationBakeCookie(mig, driver, vm,
                                    QEMU_MIGRATION_DESTINATION,
                                    cookieout, cookieoutlen,
5180 5181
                                    QEMU_MIGRATION_COOKIE_STATS) < 0)
            VIR_WARN("Unable to encode migration cookie");
5182 5183 5184 5185 5186 5187

        /* Remove completed stats for post-copy, everything but timing fields
         * is obsolete anyway.
         */
        if (inPostCopy)
            VIR_FREE(priv->job.completed);
5188
    }
5189

5190
    qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
5191
                             priv->job.migParams, priv->job.apiFlags);
5192

5193
    qemuMigrationJobFinish(driver, vm);
5194
    if (!virDomainObjIsActive(vm))
5195
        qemuDomainRemoveInactiveJob(driver, vm);
5196

5197
 cleanup:
5198
    VIR_FREE(jobInfo);
5199
    virPortAllocatorRelease(port);
5200
    if (priv->mon)
5201
        qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
5202
    VIR_FREE(priv->origname);
M
Michal Privoznik 已提交
5203
    virDomainObjEndAPI(&vm);
5204
    qemuMigrationCookieFree(mig);
5205 5206 5207 5208
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
5209
    virObjectUnref(cfg);
5210 5211 5212 5213

    /* Set a special error if Finish is expected to return NULL as a result of
     * successful call with retcode != 0
     */
5214
    if (retcode != 0 && !dom && virGetLastErrorCode() == VIR_ERR_OK)
5215
        virReportError(VIR_ERR_MIGRATE_FINISH_OK, NULL);
5216 5217
    return dom;
}
5218

5219

5220
/* Helper function called while vm is active.  */
5221
int
5222 5223 5224 5225
qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
                       int fd,
                       const char *compressor,
                       qemuDomainAsyncJob asyncJob)
5226 5227 5228
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int rc;
5229
    int ret = -1;
5230 5231
    virCommandPtr cmd = NULL;
    int pipeFD[2] = { -1, -1 };
5232
    unsigned long saveMigBandwidth = priv->migMaxBandwidth;
5233
    char *errbuf = NULL;
5234
    virErrorPtr orig_err = NULL;
5235 5236 5237 5238 5239

    /* Increase migration bandwidth to unlimited since target is a file.
     * Failure to change migration speed is not fatal. */
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
        qemuMonitorSetMigrationSpeed(priv->mon,
5240 5241
                                     QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
        priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
5242 5243
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
            return -1;
5244
    }
5245

5246 5247 5248 5249 5250 5251 5252
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
        /* nothing to tear down */
        return -1;
    }

5253 5254 5255 5256
    if (compressor && pipe(pipeFD) < 0) {
        virReportSystemError(errno, "%s",
                             _("Failed to create pipe for migration"));
        return -1;
5257 5258
    }

5259 5260 5261 5262
    /* All right! We can use fd migration, which means that qemu
     * doesn't have to open() the file, so while we still have to
     * grant SELinux access, we can do it on fd and avoid cleanup
     * later, as well as skip futzing with cgroup.  */
5263 5264
    if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
                                    compressor ? pipeFD[1] : fd) < 0)
5265 5266
        goto cleanup;

5267
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
5268 5269
        goto cleanup;

5270
    if (!compressor) {
5271 5272 5273
        rc = qemuMonitorMigrateToFd(priv->mon,
                                    QEMU_MONITOR_MIGRATE_BACKGROUND,
                                    fd);
5274 5275 5276 5277 5278 5279 5280
    } else {
        const char *prog = compressor;
        const char *args[] = {
            prog,
            "-c",
            NULL
        };
5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291

        cmd = virCommandNewArgs(args);
        virCommandSetInputFD(cmd, pipeFD[0]);
        virCommandSetOutputFD(cmd, &fd);
        virCommandSetErrorBuffer(cmd, &errbuf);
        virCommandDoAsyncIO(cmd);
        if (virSetCloseExec(pipeFD[1]) < 0) {
            virReportSystemError(errno, "%s",
                                 _("Unable to set cloexec flag"));
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
            goto cleanup;
5292
        }
5293 5294 5295 5296 5297 5298 5299 5300 5301 5302
        if (virCommandRunAsync(cmd, NULL) < 0) {
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
            goto cleanup;
        }
        rc = qemuMonitorMigrateToFd(priv->mon,
                                    QEMU_MONITOR_MIGRATE_BACKGROUND,
                                    pipeFD[1]);
        if (VIR_CLOSE(pipeFD[0]) < 0 ||
            VIR_CLOSE(pipeFD[1]) < 0)
            VIR_WARN("failed to close intermediate pipe");
5303
    }
5304
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
5305
        goto cleanup;
5306 5307 5308
    if (rc < 0)
        goto cleanup;

5309
    rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0);
5310

5311 5312 5313 5314
    if (rc < 0) {
        if (rc == -2) {
            orig_err = virSaveLastError();
            virCommandAbort(cmd);
5315 5316
            if (virDomainObjIsActive(vm) &&
                qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5317
                qemuMonitorMigrateCancel(priv->mon);
5318
                ignore_value(qemuDomainObjExitMonitor(driver, vm));
5319 5320
            }
        }
5321
        goto cleanup;
5322
    }
5323

5324 5325 5326
    if (cmd && virCommandWait(cmd, NULL) < 0)
        goto cleanup;

5327
    qemuDomainEventEmitJobCompleted(driver, vm);
5328 5329
    ret = 0;

5330
 cleanup:
5331 5332 5333
    if (ret < 0 && !orig_err)
        orig_err = virSaveLastError();

5334
    /* Restore max migration bandwidth */
5335 5336
    if (virDomainObjIsActive(vm) &&
        qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5337 5338
        qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth);
        priv->migMaxBandwidth = saveMigBandwidth;
5339
        ignore_value(qemuDomainObjExitMonitor(driver, vm));
5340 5341
    }

5342 5343
    VIR_FORCE_CLOSE(pipeFD[0]);
    VIR_FORCE_CLOSE(pipeFD[1]);
5344 5345 5346 5347 5348
    if (cmd) {
        VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf));
        VIR_FREE(errbuf);
        virCommandFree(cmd);
    }
5349 5350 5351 5352 5353 5354

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

5355 5356
    return ret;
}
5357

5358 5359

int
5360 5361
qemuMigrationSrcCancel(virQEMUDriverPtr driver,
                       virDomainObjPtr vm)
5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    bool storage = false;
    size_t i;
    int ret = -1;

    VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
              vm->def->name);

    qemuDomainObjEnterMonitor(driver, vm);
    ignore_value(qemuMonitorMigrateCancel(priv->mon));
5373
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
5374 5375 5376 5377 5378
        goto cleanup;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
5379
        qemuBlockJobDataPtr job;
5380

5381
        if (!(job = qemuBlockJobDiskGetJob(disk)) ||
5382
            !qemuBlockJobIsRunning(job))
5383
            diskPriv->migrating = false;
5384 5385

        if (diskPriv->migrating) {
5386
            qemuBlockJobSyncBegin(job);
5387
            storage = true;
5388 5389
        }

5390 5391
        virObjectUnref(job);
    }
5392 5393 5394

    if (storage &&
        qemuMigrationSrcNBDCopyCancel(driver, vm, false,
5395
                                      QEMU_ASYNC_JOB_NONE, NULL) < 0)
5396
        goto cleanup;
5397 5398 5399 5400 5401 5402 5403 5404

    ret = 0;

 cleanup:
    return ret;
}


5405
static int
5406
qemuMigrationJobStart(virQEMUDriverPtr driver,
5407
                      virDomainObjPtr vm,
5408 5409
                      qemuDomainAsyncJob job,
                      unsigned long apiFlags)
5410
{
5411
    qemuDomainObjPrivatePtr priv = vm->privateData;
5412 5413
    virDomainJobOperation op;
    unsigned long long mask;
5414

5415
    if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
5416 5417
        op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
        mask = QEMU_JOB_NONE;
5418
    } else {
5419 5420 5421 5422
        op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
        mask = QEMU_JOB_DEFAULT_MASK |
               JOB_MASK(QEMU_JOB_SUSPEND) |
               JOB_MASK(QEMU_JOB_MIGRATION_OP);
5423
    }
5424

5425
    if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
5426 5427
        return -1;

5428 5429
    priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;

5430
    qemuDomainObjSetAsyncJobMask(vm, mask);
5431 5432 5433
    return 0;
}

5434
static void
5435
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
5436
                         virDomainObjPtr vm,
5437
                         qemuMigrationJobPhase phase)
5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (phase < priv->job.phase) {
        VIR_ERROR(_("migration protocol going backwards %s => %s"),
                  qemuMigrationJobPhaseTypeToString(priv->job.phase),
                  qemuMigrationJobPhaseTypeToString(phase));
        return;
    }

    qemuDomainObjSetJobPhase(driver, vm, phase);
}

5451
static void
5452
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
5453
                           virDomainObjPtr vm,
5454
                           qemuMigrationJobPhase phase)
5455 5456 5457 5458
{
    qemuMigrationJobSetPhase(driver, vm, phase);
}

5459
static void
5460 5461
qemuMigrationJobContinue(virDomainObjPtr vm)
{
5462
    qemuDomainObjReleaseAsyncJob(vm);
5463 5464
}

5465
static bool
5466
qemuMigrationJobIsActive(virDomainObjPtr vm,
5467
                         qemuDomainAsyncJob job)
5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (priv->job.asyncJob != job) {
        const char *msg;

        if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
            msg = _("domain '%s' is not processing incoming migration");
        else
            msg = _("domain '%s' is not being migrated");

5479
        virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
5480 5481 5482 5483 5484
        return false;
    }
    return true;
}

5485
static void
5486
qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
5487
{
5488
    qemuDomainObjEndAsyncJob(driver, vm);
5489
}
5490 5491 5492


static void
5493
qemuMigrationDstErrorFree(void *data,
5494 5495 5496 5497 5498 5499 5500
                       const void *name ATTRIBUTE_UNUSED)
{
    virErrorPtr err = data;
    virFreeError(err);
}

int
5501
qemuMigrationDstErrorInit(virQEMUDriverPtr driver)
5502
{
5503
    driver->migrationErrors = virHashAtomicNew(64, qemuMigrationDstErrorFree);
5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514
    if (driver->migrationErrors)
        return 0;
    else
        return -1;
}

/**
 * This function consumes @err; the caller should consider the @err pointer
 * invalid after calling this function.
 */
void
5515 5516 5517
qemuMigrationDstErrorSave(virQEMUDriverPtr driver,
                          const char *name,
                          virErrorPtr err)
5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530
{
    if (!err)
        return;

    VIR_DEBUG("Saving incoming migration error for domain %s: %s",
              name, err->message);
    if (virHashAtomicUpdate(driver->migrationErrors, name, err) < 0) {
        VIR_WARN("Failed to save migration error for domain '%s'", name);
        virFreeError(err);
    }
}

void
5531 5532
qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
                            const char *name)
5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543
{
    virErrorPtr err;

    if (!(err = virHashAtomicSteal(driver->migrationErrors, name)))
        return;

    VIR_DEBUG("Restoring saved incoming migration error for domain %s: %s",
              name, err->message);
    virSetError(err);
    virFreeError(err);
}
5544 5545


5546
int
5547 5548 5549 5550
qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 qemuDomainAsyncJob asyncJob,
                                 qemuDomainJobInfoPtr jobInfo)
5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571
{
    size_t i;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    bool nbd = false;
    virHashTablePtr blockinfo = NULL;
    qemuDomainMirrorStatsPtr stats = &jobInfo->mirrorStats;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) {
            nbd = true;
            break;
        }
    }

    if (!nbd)
        return 0;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

5572
    blockinfo = qemuMonitorGetAllBlockJobInfo(priv->mon, false);
5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockinfo)
        return -1;

    memset(stats, 0, sizeof(*stats));

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
        qemuMonitorBlockJobInfoPtr data;

        if (!diskPriv->migrating ||
            !(data = virHashLookup(blockinfo, disk->info.alias)))
            continue;

        stats->transferred += data->cur;
        stats->total += data->end;
    }

    virHashFree(blockinfo);
    return 0;
}