qemu_migration.c 188.0 KB
Newer Older
1 2 3
/*
 * qemu_migration.c: QEMU migration handling
 *
4
 * Copyright (C) 2006-2015 Red Hat, Inc.
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library.  If not, see
O
Osier Yang 已提交
18
 * <http://www.gnu.org/licenses/>.
19 20 21 22 23
 *
 */

#include <config.h>

J
Ján Tomko 已提交
24 25
#include <netdb.h>
#include <sys/socket.h>
26
#include <sys/time.h>
27
#include <fcntl.h>
28
#include <poll.h>
29 30

#include "qemu_migration.h"
31
#include "qemu_migration_cookie.h"
32
#include "qemu_migration_params.h"
33 34 35 36
#include "qemu_monitor.h"
#include "qemu_domain.h"
#include "qemu_process.h"
#include "qemu_capabilities.h"
37
#include "qemu_alias.h"
38
#include "qemu_cgroup.h"
39
#include "qemu_hotplug.h"
40
#include "qemu_blockjob.h"
41
#include "qemu_security.h"
42
#include "qemu_block.h"
43

44
#include "domain_audit.h"
45
#include "virlog.h"
46
#include "virerror.h"
47
#include "viralloc.h"
E
Eric Blake 已提交
48
#include "virfile.h"
49
#include "virnetdevopenvswitch.h"
50
#include "datatypes.h"
51
#include "virfdstream.h"
52
#include "viruuid.h"
53
#include "virtime.h"
54
#include "locking/domain_lock.h"
55
#include "rpc/virnetsocket.h"
56
#include "virstoragefile.h"
M
Martin Kletzander 已提交
57
#include "viruri.h"
58
#include "virhook.h"
59
#include "virstring.h"
60
#include "virtypedparam.h"
M
Michael R. Hines 已提交
61
#include "virprocess.h"
62
#include "nwfilter_conf.h"
63 64 65

#define VIR_FROM_THIS VIR_FROM_QEMU

66 67
VIR_LOG_INIT("qemu.qemu_migration");

68 69 70 71 72 73 74 75 76 77 78 79 80
VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST,
              "none",
              "perform2",
              "begin3",
              "perform3",
              "perform3_done",
              "confirm3_cancelled",
              "confirm3",
              "prepare",
              "finish2",
              "finish3",
);

81 82 83
static int
qemuMigrationJobStart(virQEMUDriverPtr driver,
                      virDomainObjPtr vm,
84 85
                      qemuDomainAsyncJob job,
                      unsigned long apiFlags)
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;

static void
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
                         virDomainObjPtr vm,
                         qemuMigrationJobPhase phase)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);

static void
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           qemuMigrationJobPhase phase)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);

static void
qemuMigrationJobContinue(virDomainObjPtr obj)
    ATTRIBUTE_NONNULL(1);

static bool
qemuMigrationJobIsActive(virDomainObjPtr vm,
                         qemuDomainAsyncJob job)
    ATTRIBUTE_NONNULL(1);

static void
qemuMigrationJobFinish(virQEMUDriverPtr driver,
                       virDomainObjPtr obj)
    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
113

114
static void
115
qemuMigrationSrcStoreDomainState(virDomainObjPtr vm)
116 117 118 119 120 121 122 123 124 125
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    priv->preMigrationState = virDomainObjGetState(vm, NULL);

    VIR_DEBUG("Storing pre-migration state=%d domain=%p",
              priv->preMigrationState, vm);
}

/* Returns true if the domain was resumed, false otherwise */
static bool
126
qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
127 128
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
129 130
    int reason;
    virDomainState state = virDomainObjGetState(vm, &reason);
131 132
    bool ret = false;

133 134 135 136 137
    VIR_DEBUG("driver=%p, vm=%p, pre-mig-state=%s, state=%s, reason=%s",
              driver, vm,
              virDomainStateTypeToString(priv->preMigrationState),
              virDomainStateTypeToString(state),
              virDomainStateReasonToString(state, reason));
138

139 140 141 142 143
    if (state != VIR_DOMAIN_PAUSED ||
        reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
        goto cleanup;

    if (priv->preMigrationState == VIR_DOMAIN_RUNNING) {
144 145 146 147 148 149
        /* This is basically the only restore possibility that's safe
         * and we should attempt to do */

        VIR_DEBUG("Restoring pre-migration state due to migration error");

        /* we got here through some sort of failure; start the domain again */
150
        if (qemuProcessStartCPUs(driver, vm,
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
                                 VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
                                 QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
            /* Hm, we already know we are in error here.  We don't want to
             * overwrite the previous error, though, so we just throw something
             * to the logs and hope for the best */
            VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name);
            goto cleanup;
        }
        ret = true;
    }

 cleanup:
    priv->preMigrationState = VIR_DOMAIN_NOSTATE;
    return ret;
}

167 168

static int
169 170 171
qemuMigrationDstPrecreateDisk(virConnectPtr conn,
                              virDomainDiskDefPtr disk,
                              unsigned long long capacity)
172 173 174 175 176 177 178 179 180 181 182 183
{
    int ret = -1;
    virStoragePoolPtr pool = NULL;
    virStorageVolPtr vol = NULL;
    char *volName = NULL, *basePath = NULL;
    char *volStr = NULL;
    virBuffer buf = VIR_BUFFER_INITIALIZER;
    const char *format = NULL;
    unsigned int flags = 0;

    VIR_DEBUG("Precreate disk type=%s", virStorageTypeToString(disk->src->type));

184
    switch ((virStorageType)disk->src->type) {
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
    case VIR_STORAGE_TYPE_FILE:
        if (!virDomainDiskGetSource(disk)) {
            VIR_DEBUG("Dropping sourceless disk '%s'",
                      disk->dst);
            return 0;
        }

        if (VIR_STRDUP(basePath, disk->src->path) < 0)
            goto cleanup;

        if (!(volName = strrchr(basePath, '/'))) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("malformed disk path: %s"),
                           disk->src->path);
            goto cleanup;
        }

        *volName = '\0';
        volName++;

205
        if (!(pool = virStoragePoolLookupByTargetPath(conn, basePath)))
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
            goto cleanup;
        format = virStorageFileFormatTypeToString(disk->src->format);
        if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
            flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
        break;

    case VIR_STORAGE_TYPE_VOLUME:
        if (!(pool = virStoragePoolLookupByName(conn, disk->src->srcpool->pool)))
            goto cleanup;
        format = virStorageFileFormatTypeToString(disk->src->format);
        volName = disk->src->srcpool->volume;
        if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
            flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
        break;

221 222 223 224 225
    case VIR_STORAGE_TYPE_NETWORK:
        VIR_DEBUG("Skipping creation of network disk '%s'",
                  disk->dst);
        return 0;

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
    case VIR_STORAGE_TYPE_BLOCK:
    case VIR_STORAGE_TYPE_DIR:
    case VIR_STORAGE_TYPE_NONE:
    case VIR_STORAGE_TYPE_LAST:
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("cannot precreate storage for disk type '%s'"),
                       virStorageTypeToString(disk->src->type));
        goto cleanup;
    }

    if ((vol = virStorageVolLookupByName(pool, volName))) {
        VIR_DEBUG("Skipping creation of already existing volume of name '%s'",
                  volName);
        ret = 0;
        goto cleanup;
    }

    virBufferAddLit(&buf, "<volume>\n");
    virBufferAdjustIndent(&buf, 2);
    virBufferEscapeString(&buf, "<name>%s</name>\n", volName);
    virBufferAsprintf(&buf, "<capacity>%llu</capacity>\n", capacity);
    virBufferAddLit(&buf, "<target>\n");
    virBufferAdjustIndent(&buf, 2);
    virBufferAsprintf(&buf, "<format type='%s'/>\n", format);
    virBufferAdjustIndent(&buf, -2);
    virBufferAddLit(&buf, "</target>\n");
    virBufferAdjustIndent(&buf, -2);
    virBufferAddLit(&buf, "</volume>\n");

    if (!(volStr = virBufferContentAndReset(&buf))) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("unable to create volume XML"));
        goto cleanup;
    }

    if (!(vol = virStorageVolCreateXML(pool, volStr, flags)))
        goto cleanup;

    ret = 0;
 cleanup:
    VIR_FREE(basePath);
    VIR_FREE(volStr);
    virObjectUnref(vol);
    virObjectUnref(pool);
    return ret;
}

273
static bool
274 275
qemuMigrationAnyCopyDisk(virDomainDiskDef const *disk,
                         size_t nmigrate_disks, const char **migrate_disks)
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
{
    size_t i;

    /* Check if the disk alias is in the list */
    if (nmigrate_disks) {
        for (i = 0; i < nmigrate_disks; i++) {
            if (STREQ(disk->dst, migrate_disks[i]))
                return true;
        }
        return false;
    }

    /* Default is to migrate only non-shared non-readonly disks
     * with source */
    return !disk->src->shared && !disk->src->readonly &&
291
           !virStorageSourceIsEmpty(disk->src);
292 293
}

294 295

static int
296 297 298 299 300
qemuMigrationDstPrecreateStorage(virDomainObjPtr vm,
                                 qemuMigrationCookieNBDPtr nbd,
                                 size_t nmigrate_disks,
                                 const char **migrate_disks,
                                 bool incremental)
301 302 303
{
    int ret = -1;
    size_t i = 0;
304
    virConnectPtr conn;
305 306 307 308

    if (!nbd || !nbd->ndisks)
        return 0;

309 310 311
    if (!(conn = virGetConnectStorage()))
        return -1;

312 313 314 315
    for (i = 0; i < nbd->ndisks; i++) {
        virDomainDiskDefPtr disk;
        const char *diskSrcPath;

316
        VIR_DEBUG("Looking up disk target '%s' (capacity=%llu)",
317 318
                  nbd->disks[i].target, nbd->disks[i].capacity);

319 320
        if (!(disk = virDomainDiskByName(vm->def, nbd->disks[i].target,
                                         false))) {
321 322 323 324 325 326 327 328
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           _("unable to find disk by target: %s"),
                           nbd->disks[i].target);
            goto cleanup;
        }

        diskSrcPath = virDomainDiskGetSource(disk);

329
        /* Skip disks we don't want to migrate and already existing disks. */
330
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks) ||
331 332 333 334
            (diskSrcPath && virFileExists(diskSrcPath))) {
            continue;
        }

335 336 337 338 339 340 341
        if (incremental) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("pre-creation of storage targets for incremental "
                             "storage migration is not supported"));
            goto cleanup;
        }

342 343
        VIR_DEBUG("Proceeding with disk source %s", NULLSTR(diskSrcPath));

344
        if (qemuMigrationDstPrecreateDisk(conn, disk, nbd->disks[i].capacity) < 0)
345 346 347 348 349
            goto cleanup;
    }

    ret = 0;
 cleanup:
350
    virObjectUnref(conn);
351 352 353 354
    return ret;
}


355
/**
356
 * qemuMigrationDstStartNBDServer:
357 358 359 360 361 362 363 364 365 366 367
 * @driver: qemu driver
 * @vm: domain
 *
 * Starts NBD server. This is a newer method to copy
 * storage during migration than using 'blk' and 'inc'
 * arguments in 'migrate' monitor command.
 * Error is reported here.
 *
 * Returns 0 on success, -1 otherwise.
 */
static int
368 369 370 371 372
qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
                               const char *listenAddr,
                               size_t nmigrate_disks,
                               const char **migrate_disks,
373 374
                               int nbdPort,
                               const char *tls_alias)
375 376 377 378 379 380 381
{
    int ret = -1;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    unsigned short port = 0;
    char *diskAlias = NULL;
    size_t i;

382 383 384 385 386 387
    if (nbdPort < 0 || nbdPort > USHRT_MAX) {
        virReportError(VIR_ERR_INVALID_ARG, "%s",
                       _("nbd port must be in range 0-65535"));
        return -1;
    }

388 389 390
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];

391
        /* check whether disk should be migrated */
392
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
393 394
            continue;

395
        if (disk->src->readonly || virStorageSourceIsEmpty(disk->src)) {
396
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
397 398
                           _("Cannot migrate empty or read-only disk %s"),
                           disk->dst);
399 400 401
            goto cleanup;
        }

402
        VIR_FREE(diskAlias);
403
        if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
404 405 406 407 408 409
            goto cleanup;

        if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
            goto cleanup;

410 411 412 413 414 415
        if (port == 0) {
            if (nbdPort)
                port = nbdPort;
            else if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
                goto exit_monitor;

416
            if (qemuMonitorNBDServerStart(priv->mon, listenAddr, port, tls_alias) < 0)
417
                goto exit_monitor;
418 419
        }

420 421 422
        if (qemuMonitorNBDServerAdd(priv->mon, diskAlias, true) < 0)
            goto exit_monitor;
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
423 424 425 426 427 428
            goto cleanup;
    }

    priv->nbdPort = port;
    ret = 0;

429
 cleanup:
430
    VIR_FREE(diskAlias);
431
    if (ret < 0 && nbdPort == 0)
432
        virPortAllocatorRelease(port);
433
    return ret;
434 435 436 437

 exit_monitor:
    ignore_value(qemuDomainObjExitMonitor(driver, vm));
    goto cleanup;
438 439
}

440 441

static int
442 443 444
qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              qemuMigrationCookiePtr mig)
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (!mig->nbd)
        return 0;

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
        return -1;

    if (qemuMonitorNBDServerStop(priv->mon) < 0)
        VIR_WARN("Unable to stop NBD server");
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
        return -1;

460
    virPortAllocatorRelease(priv->nbdPort);
461 462 463 464 465 466
    priv->nbdPort = 0;
    return 0;
}


/**
467
 * qemuMigrationSrcNBDStorageCopyReady:
468 469
 * @vm: domain
 *
470 471
 * Check the status of all drives copied via qemuMigrationSrcNBDStorageCopy.
 * Any pending block job events for the mirrored disks will be processed.
472 473 474 475 476 477
 *
 * Returns 1 if all mirrors are "ready",
 *         0 if some mirrors are still performing initial sync,
 *        -1 on error.
 */
static int
478 479
qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm,
                                    qemuDomainAsyncJob asyncJob)
480 481
{
    size_t i;
482 483
    size_t notReady = 0;
    int status;
484 485 486

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
487
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
488
        char *error = NULL;
489

490
        if (!diskPriv->migrating)
491 492
            continue;

493
        status = qemuBlockJobUpdateDisk(vm, asyncJob, disk, &error);
494
        if (status == VIR_DOMAIN_BLOCK_JOB_FAILED) {
495 496 497 498 499 500 501 502 503
            if (error) {
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("migration of disk %s failed: %s"),
                               disk->dst, error);
                VIR_FREE(error);
            } else {
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("migration of disk %s failed"), disk->dst);
            }
504 505
            return -1;
        }
506
        VIR_FREE(error);
507 508 509

        if (disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY)
            notReady++;
510 511
    }

512 513 514 515 516 517 518
    if (notReady) {
        VIR_DEBUG("Waiting for %zu disk mirrors to get ready", notReady);
        return 0;
    } else {
        VIR_DEBUG("All disk mirrors are ready");
        return 1;
    }
519 520 521
}


522 523 524 525
/*
 * If @check is true, the function will report an error and return a different
 * code in case a block job fails. This way we can properly abort migration in
 * case some block jobs failed once all memory has already been transferred.
526
 *
527 528 529 530
 * Returns 1 if all mirrors are gone,
 *         0 if some mirrors are still active,
 *         -1 some mirrors failed but some are still active,
 *         -2 all mirrors are gone but some of them failed.
531 532
 */
static int
533 534 535
qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm,
                                 qemuDomainAsyncJob asyncJob,
                                 bool check)
536
{
537 538
    size_t i;
    size_t active = 0;
539
    size_t completed = 0;
540 541
    int status;
    bool failed = false;
542

543
 retry:
544 545 546
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
547
        char *error = NULL;
548

549 550
        if (!diskPriv->migrating)
            continue;
551

552
        status = qemuBlockJobUpdateDisk(vm, asyncJob, disk, &error);
553 554 555
        switch (status) {
        case VIR_DOMAIN_BLOCK_JOB_FAILED:
            if (check) {
556 557 558 559 560 561 562 563
                if (error) {
                    virReportError(VIR_ERR_OPERATION_FAILED,
                                   _("migration of disk %s failed: %s"),
                                   disk->dst, error);
                } else {
                    virReportError(VIR_ERR_OPERATION_FAILED,
                                   _("migration of disk %s failed"), disk->dst);
                }
564
                failed = true;
565
            }
M
Marc Hartmayer 已提交
566
            ATTRIBUTE_FALLTHROUGH;
567 568
        case VIR_DOMAIN_BLOCK_JOB_CANCELED:
        case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
569
            qemuBlockJobSyncEndDisk(vm, asyncJob, disk);
570 571
            diskPriv->migrating = false;
            break;
572

573 574
        default:
            active++;
575
        }
576 577 578

        if (status == VIR_DOMAIN_BLOCK_JOB_COMPLETED)
            completed++;
579 580

        VIR_FREE(error);
581 582 583 584 585 586 587 588 589 590
    }

    /* Updating completed block job drops the lock thus we have to recheck
     * block jobs for disks that reside before the disk(s) with completed
     * block job.
     */
    if (completed > 0) {
        completed = 0;
        active = 0;
        goto retry;
591
    }
592

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
    if (failed) {
        if (active) {
            VIR_DEBUG("Some disk mirrors failed; still waiting for %zu "
                      "disk mirrors to finish", active);
            return -1;
        } else {
            VIR_DEBUG("All disk mirrors are gone; some of them failed");
            return -2;
        }
    } else {
        if (active) {
            VIR_DEBUG("Waiting for %zu disk mirrors to finish", active);
            return 0;
        } else {
            VIR_DEBUG("All disk mirrors are gone");
            return 1;
609
        }
610
    }
611
}
612 613


614 615 616 617 618 619
/*
 * Returns 0 on success,
 *         1 when job is already completed or it failed and failNoJob is false,
 *         -1 on error or when job failed and failNoJob is true.
 */
static int
620 621 622 623 624
qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 virDomainDiskDefPtr disk,
                                 bool failNoJob,
                                 qemuDomainAsyncJob asyncJob)
625 626 627
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    char *diskAlias = NULL;
628
    char *error = NULL;
629 630 631 632
    int ret = -1;
    int status;
    int rv;

633
    status = qemuBlockJobUpdateDisk(vm, asyncJob, disk, &error);
634 635 636 637
    switch (status) {
    case VIR_DOMAIN_BLOCK_JOB_FAILED:
    case VIR_DOMAIN_BLOCK_JOB_CANCELED:
        if (failNoJob) {
638 639 640 641 642 643 644 645 646
            if (error) {
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("migration of disk %s failed: %s"),
                               disk->dst, error);
            } else {
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("migration of disk %s failed"), disk->dst);
            }
            goto cleanup;
647
        }
648
        ATTRIBUTE_FALLTHROUGH;
649
    case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
650 651
        ret = 1;
        goto cleanup;
652 653
    }

654
    if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
655 656
        return -1;

657
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
658 659
        goto cleanup;

660
    rv = qemuMonitorBlockJobCancel(priv->mon, diskAlias);
661 662 663 664 665

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        goto cleanup;

    ret = 0;
666 667 668

 cleanup:
    VIR_FREE(diskAlias);
669
    VIR_FREE(error);
670 671 672 673 674
    return ret;
}


/**
675
 * qemuMigrationSrcNBDCopyCancel:
676 677
 * @driver: qemu driver
 * @vm: domain
678
 * @check: if true report an error when some of the mirrors fails
679
 *
680 681
 * Cancel all drive-mirrors started by qemuMigrationSrcNBDStorageCopy.
 * Any pending block job events for the affected disks will be processed.
682 683 684 685
 *
 * Returns 0 on success, -1 otherwise.
 */
static int
686 687 688 689 690
qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              bool check,
                              qemuDomainAsyncJob asyncJob,
                              virConnectPtr dconn)
691
{
692
    virErrorPtr err = NULL;
693
    int ret = -1;
694
    size_t i;
695 696 697 698
    int rv;
    bool failed = false;

    VIR_DEBUG("Cancelling drive mirrors for domain %s", vm->def->name);
699 700 701

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
702
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
703

704
        if (!diskPriv->migrating)
705 706
            continue;

707 708
        rv = qemuMigrationSrcNBDCopyCancelOne(driver, vm, disk,
                                              check, asyncJob);
709 710 711 712 713 714
        if (rv != 0) {
            if (rv < 0) {
                if (!err)
                    err = virSaveLastError();
                failed = true;
            }
715
            qemuBlockJobSyncEndDisk(vm, asyncJob, disk);
716 717 718 719
            diskPriv->migrating = false;
        }
    }

720
    while ((rv = qemuMigrationSrcNBDCopyCancelled(vm, asyncJob, check)) != 1) {
721 722 723 724 725 726 727
        if (check && !failed &&
            dconn && virConnectIsAlive(dconn) <= 0) {
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Lost connection to destination host"));
            failed = true;
        }

728 729 730 731
        if (rv < 0) {
            failed = true;
            if (rv == -2)
                break;
732
        }
733

734 735 736 737 738
        if (failed && !err)
            err = virSaveLastError();

        if (virDomainObjWait(vm) < 0)
            goto cleanup;
739 740
    }

741 742 743 744 745 746 747 748 749 750 751 752 753
    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);

        if (!diskPriv->migrSource)
            continue;

        qemuBlockStorageSourceDetachOneBlockdev(driver, vm, asyncJob,
                                                diskPriv->migrSource);
        virStorageSourceFree(diskPriv->migrSource);
        diskPriv->migrSource = NULL;
    }

754 755 756
    ret = failed ? -1 : 0;

 cleanup:
757 758 759 760 761
    if (err) {
        virSetError(err);
        virFreeError(err);
    }
    return ret;
762 763 764
}


765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
static int
qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver,
                                       virDomainObjPtr vm,
                                       virDomainDiskDefPtr disk,
                                       const char *diskAlias,
                                       const char *host,
                                       int port,
                                       unsigned long long mirror_speed,
                                       unsigned int mirror_flags,
                                       const char *tlsAlias)
{
    qemuBlockStorageSourceAttachDataPtr data = NULL;
    qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
    virStorageSourcePtr copysrc = NULL;
    int mon_ret = 0;
    int ret = -1;

    VIR_DEBUG("starting blockdev mirror for disk=%s to host=%s", diskAlias, host);

    if (VIR_ALLOC(copysrc) < 0)
        goto cleanup;

    copysrc->type = VIR_STORAGE_TYPE_NETWORK;
    copysrc->protocol = VIR_STORAGE_NET_PROTOCOL_NBD;
    copysrc->format = VIR_STORAGE_FILE_RAW;

    if (VIR_ALLOC(copysrc->backingStore) < 0)
        goto cleanup;

    if (VIR_STRDUP(copysrc->path, diskAlias) < 0)
        goto cleanup;

    if (VIR_ALLOC_N(copysrc->hosts, 1) < 0)
        goto cleanup;

    copysrc->nhosts = 1;
    copysrc->hosts->transport = VIR_STORAGE_NET_HOST_TRANS_TCP;
    copysrc->hosts->port = port;
    if (VIR_STRDUP(copysrc->hosts->name, host) < 0)
        goto cleanup;

    if (VIR_STRDUP(copysrc->tlsAlias, tlsAlias) < 0)
        goto cleanup;

    if (virAsprintf(&copysrc->nodestorage, "migration-%s-storage", disk->dst) < 0 ||
        virAsprintf(&copysrc->nodeformat, "migration-%s-format", disk->dst) < 0)
        goto cleanup;

    if (!(data = qemuBlockStorageSourceAttachPrepareBlockdev(copysrc)))
        goto cleanup;

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
        goto cleanup;

    mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);

    if (mon_ret == 0)
        mon_ret = qemuMonitorBlockdevMirror(qemuDomainGetMonitor(vm), NULL,
                                            diskAlias, copysrc->nodeformat,
                                            mirror_speed, 0, 0, mirror_flags);

    if (mon_ret != 0)
        qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
        goto cleanup;

    VIR_STEAL_PTR(diskPriv->migrSource, copysrc);

    ret = 0;

 cleanup:
    qemuBlockStorageSourceAttachDataFree(data);
    virStorageSourceFree(copysrc);
    return ret;
}


844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
static int
qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver,
                                          virDomainObjPtr vm,
                                          const char *diskAlias,
                                          const char *host,
                                          int port,
                                          unsigned long long mirror_speed,
                                          unsigned int mirror_flags)
{
    char *nbd_dest = NULL;
    int mon_ret;
    int ret = -1;

    if (strchr(host, ':')) {
        if (virAsprintf(&nbd_dest, "nbd:[%s]:%d:exportname=%s",
                        host, port, diskAlias) < 0)
            goto cleanup;
    } else {
        if (virAsprintf(&nbd_dest, "nbd:%s:%d:exportname=%s",
                        host, port, diskAlias) < 0)
            goto cleanup;
    }

    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
        goto cleanup;

    mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
                                     diskAlias, nbd_dest, "raw",
                                     mirror_speed, 0, 0, mirror_flags);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
        goto cleanup;

    ret = 0;

 cleanup:
    VIR_FREE(nbd_dest);
    return ret;
}


886
/**
887
 * qemuMigrationSrcNBDStorageCopy:
888 889 890 891
 * @driver: qemu driver
 * @vm: domain
 * @mig: migration cookie
 * @host: where are we migrating to
892
 * @speed: bandwidth limit in MiB/s
893 894
 * @migrate_flags: migrate monitor command flags
 *
895 896 897
 * Migrate non-shared storage using the NBD protocol to the server running
 * inside the qemu process on dst and wait until the copy converges.
 * On success update @migrate_flags so we don't tell 'migrate' command
898
 * to do the very same operation. On failure, the caller is
899 900
 * expected to call qemuMigrationSrcNBDCopyCancel to stop all
 * running copy operations.
901 902 903 904 905
 *
 * Returns 0 on success (@migrate_flags updated),
 *        -1 otherwise.
 */
static int
906 907 908 909 910 911 912 913
qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver,
                               virDomainObjPtr vm,
                               qemuMigrationCookiePtr mig,
                               const char *host,
                               unsigned long speed,
                               unsigned int *migrate_flags,
                               size_t nmigrate_disks,
                               const char **migrate_disks,
914 915 916
                               virConnectPtr dconn,
                               const char *tlsAlias,
                               unsigned int flags)
917 918 919 920
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret = -1;
    int port;
921
    size_t i;
922
    char *diskAlias = NULL;
923
    unsigned long long mirror_speed = speed;
924
    unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
925
    int rv;
926
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
927 928

    VIR_DEBUG("Starting drive mirrors for domain %s", vm->def->name);
929

930 931 932 933 934 935 936 937
    if (mirror_speed > LLONG_MAX >> 20) {
        virReportError(VIR_ERR_OVERFLOW,
                       _("bandwidth must be less than %llu"),
                       LLONG_MAX >> 20);
        goto cleanup;
    }
    mirror_speed <<= 20;

938 939 940 941 942 943 944 945 946
    /* steal NBD port and thus prevent its propagation back to destination */
    port = mig->nbd->port;
    mig->nbd->port = 0;

    if (*migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC)
        mirror_flags |= VIR_DOMAIN_BLOCK_REBASE_SHALLOW;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
947
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
948
        int rc;
949

950
        /* check whether disk should be migrated */
951
        if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
952 953
            continue;

954
        if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
955 956
            goto cleanup;

957
        qemuBlockJobSyncBeginDisk(disk);
958

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
        if (flags & VIR_MIGRATE_TLS) {
            rc = qemuMigrationSrcNBDStorageCopyBlockdev(driver, vm,
                                                        disk, diskAlias,
                                                        host, port,
                                                        mirror_speed,
                                                        mirror_flags,
                                                        tlsAlias);
        } else {
            rc = qemuMigrationSrcNBDStorageCopyDriveMirror(driver, vm, diskAlias,
                                                           host, port,
                                                           mirror_speed,
                                                           mirror_flags);
        }

        if (rc < 0) {
974
            qemuBlockJobSyncEndDisk(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, disk);
975 976
            goto cleanup;
        }
977 978

        VIR_FREE(diskAlias);
979
        diskPriv->migrating = true;
980
        diskPriv->blockjob->started = true;
981

982
        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
983 984 985
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
            goto cleanup;
        }
986
    }
987

988
    while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
989 990
        if (rv < 0)
            goto cleanup;
991

992
        if (priv->job.abortJob) {
993
            priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
994 995 996 997
            virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                           qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                           _("canceled by client"));
            goto cleanup;
998
        }
999

1000 1001 1002 1003 1004 1005
        if (dconn && virConnectIsAlive(dconn) <= 0) {
            virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                           _("Lost connection to destination host"));
            goto cleanup;
        }

1006
        if (virDomainObjWait(vm) < 0)
1007
            goto cleanup;
1008 1009
    }

1010 1011
    qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                     priv->job.current);
1012

1013
    /* Okay, all disks are ready. Modify migrate_flags */
1014 1015 1016 1017
    *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                        QEMU_MONITOR_MIGRATE_NON_SHARED_INC);
    ret = 0;

1018
 cleanup:
1019
    virObjectUnref(cfg);
1020 1021 1022
    VIR_FREE(diskAlias);
    return ret;
}
1023

1024

1025
/**
1026
 * qemuMigrationSrcIsAllowedHostdev:
1027 1028
 * @def: domain definition
 *
1029
 * Checks that @def does not contain any host devices unsupported across
1030 1031 1032
 * migrations. Returns true if the vm is allowed to migrate.
 */
static bool
1033
qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
{
    size_t i;

    /* Migration with USB host devices is allowed, all other devices are
     * forbidden. */
    for (i = 0; i < def->nhostdevs; i++) {
        virDomainHostdevDefPtr hostdev = def->hostdevs[i];
        if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
            hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("domain has assigned non-USB host devices"));
            return false;
        }
    }

    return true;
}


1053
/**
1054
 * qemuMigrationSrcIsAllowed:
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
 * @driver: qemu driver struct
 * @vm: domain object
 * @remote: migration is remote
 * @flags: migration flags (see struct virDomainMigrateFlags)
 *
 * Validates that the configuration of @vm can be migrated in various
 * situations. If @remote is true, the migration happens to remote host. @flags
 * is used to check various special migration types according to the request.
 *
 * Returns true if migration is supported. Reports libvirt error and returns
 * false otherwise.
 */
1067
bool
1068 1069 1070 1071
qemuMigrationSrcIsAllowed(virQEMUDriverPtr driver,
                          virDomainObjPtr vm,
                          bool remote,
                          unsigned int flags)
1072
{
1073
    int nsnapshots;
1074
    int pauseReason;
1075
    size_t i;
1076

1077 1078 1079 1080 1081
    /* perform these checks only when migrating to remote hosts */
    if (remote) {
        nsnapshots = virDomainSnapshotObjListNum(vm->snapshots, NULL, 0);
        if (nsnapshots < 0)
            return false;
1082

1083 1084 1085 1086 1087
        if (nsnapshots > 0) {
            virReportError(VIR_ERR_OPERATION_INVALID,
                           _("cannot migrate domain with %d snapshots"),
                           nsnapshots);
            return false;
1088
        }
1089

1090
        /* cancel migration if disk I/O error is emitted while migrating */
1091
        if (flags & VIR_MIGRATE_ABORT_ON_ERROR &&
1092
            !(flags & VIR_MIGRATE_OFFLINE) &&
1093 1094
            virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
            pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
E
Eric Blake 已提交
1095
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1096
                           _("cannot migrate domain with I/O error"));
E
Eric Blake 已提交
1097 1098
            return false;
        }
1099 1100

    }
1101

1102 1103 1104 1105 1106 1107 1108
    /* following checks don't make sense for offline migration */
    if (!(flags & VIR_MIGRATE_OFFLINE)) {
        if (qemuProcessAutoDestroyActive(driver, vm)) {
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("domain is marked for auto destroy"));
            return false;
        }
1109

1110

1111 1112
        if (qemuDomainHasBlockjob(vm, false)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
1113
                           _("domain has active block job"));
1114 1115 1116
            return false;
        }

1117
        if (!qemuMigrationSrcIsAllowedHostdev(vm->def))
1118 1119
            return false;

1120
        if (vm->def->cpu) {
1121 1122 1123
            /* QEMU blocks migration and save with invariant TSC enabled
             * unless TSC frequency is explicitly set.
             */
1124 1125
            if (virCPUCheckFeature(vm->def->os.arch, vm->def->cpu,
                                   "invtsc") == 1) {
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
                bool block = true;

                for (i = 0; i < vm->def->clock.ntimers; i++) {
                    virDomainTimerDefPtr timer = vm->def->clock.timers[i];

                    if (timer->name == VIR_DOMAIN_TIMER_NAME_TSC &&
                        timer->frequency > 0) {
                        block = false;
                        break;
                    }
                }

                if (block) {
                    virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                                   _("domain has 'invtsc' CPU feature but "
                                     "TSC frequency is not specified"));
                    return false;
                }
1144
            }
J
Ján Tomko 已提交
1145 1146
        }

1147 1148 1149
        /* Verify that memory device config can be transferred reliably */
        for (i = 0; i < vm->def->nmems; i++) {
            virDomainMemoryDefPtr mem = vm->def->mems[i];
1150

1151 1152 1153 1154 1155
            if (mem->model == VIR_DOMAIN_MEMORY_MODEL_DIMM &&
                mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM) {
                virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                               _("domain's dimm info lacks slot ID "
                                 "or base address"));
1156

1157 1158
                return false;
            }
1159
        }
1160 1161 1162 1163 1164 1165

        if (vm->def->nshmems) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("migration with shmem device is not supported"));
            return false;
        }
1166 1167
    }

1168 1169 1170
    return true;
}

1171
static bool
1172 1173 1174 1175
qemuMigrationSrcIsSafe(virDomainDefPtr def,
                       size_t nmigrate_disks,
                       const char **migrate_disks,
                       unsigned int flags)
1176

1177
{
1178 1179
    bool storagemigration = flags & (VIR_MIGRATE_NON_SHARED_DISK |
                                     VIR_MIGRATE_NON_SHARED_INC);
1180
    size_t i;
1181
    int rc;
1182

1183
    for (i = 0; i < def->ndisks; i++) {
1184
        virDomainDiskDefPtr disk = def->disks[i];
1185
        const char *src = virDomainDiskGetSource(disk);
1186

1187 1188
        /* Disks without any source (i.e. floppies and CD-ROMs)
         * OR readonly are safe. */
1189
        if (virStorageSourceIsEmpty(disk->src) ||
1190
            disk->src->readonly)
1191
            continue;
1192

1193
        /* Disks which are migrated by qemu are safe too. */
1194
        if (storagemigration &&
1195
            qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
1196
            continue;
1197

1198
        /* However, disks on local FS (e.g. ext4) are not safe. */
1199
        if (virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE) {
1200
            if ((rc = virFileIsSharedFS(src)) < 0) {
1201
                return false;
1202 1203 1204 1205 1206
            } else if (rc == 0) {
                virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
                               _("Migration without shared storage is unsafe"));
                return false;
            }
1207 1208 1209 1210 1211 1212 1213
            if ((rc = virStorageFileIsClusterFS(src)) < 0)
                return false;
            else if (rc == 1)
                continue;
        } else if (disk->src->type == VIR_STORAGE_TYPE_NETWORK &&
                   disk->src->protocol == VIR_STORAGE_NET_PROTOCOL_RBD) {
            continue;
1214
        }
1215

1216 1217 1218 1219 1220 1221 1222
        /* Our code elsewhere guarantees shared disks are either readonly (in
         * which case cache mode doesn't matter) or used with cache=none or used with cache=directsync */
        if (disk->src->shared ||
            disk->cachemode == VIR_DOMAIN_DISK_CACHE_DISABLE ||
            disk->cachemode == VIR_DOMAIN_DISK_CACHE_DIRECTSYNC)
            continue;

1223 1224
        virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
                       _("Migration may lead to data corruption if disks"
1225
                         " use cache != none or cache != directsync"));
1226
        return false;
1227 1228 1229 1230 1231
    }

    return true;
}

1232
/** qemuMigrationSrcSetOffline
1233 1234 1235
 * Pause domain for non-live migration.
 */
int
1236 1237
qemuMigrationSrcSetOffline(virQEMUDriverPtr driver,
                           virDomainObjPtr vm)
1238 1239
{
    int ret;
1240
    VIR_DEBUG("driver=%p vm=%p", driver, vm);
1241 1242
    ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
                              QEMU_ASYNC_JOB_MIGRATION_OUT);
1243
    if (ret == 0) {
1244
        virObjectEventPtr event;
1245

1246
        event = virDomainEventLifecycleNewFromObj(vm,
1247 1248
                                         VIR_DOMAIN_EVENT_SUSPENDED,
                                         VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
1249
        virObjectEventStateQueue(driver->domainEventState, event);
1250 1251 1252 1253 1254
    }

    return ret;
}

1255 1256

void
1257 1258
qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
                               virDomainObjPtr vm)
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
{
    virDomainState state;
    int reason;

    state = virDomainObjGetState(vm, &reason);

    if (state != VIR_DOMAIN_PAUSED &&
        state != VIR_DOMAIN_RUNNING)
        return;

    if (state == VIR_DOMAIN_PAUSED &&
        reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
        return;

    VIR_WARN("Migration of domain %s failed during post-copy; "
             "leaving the domain paused", vm->def->name);

    if (state == VIR_DOMAIN_RUNNING) {
        virObjectEventPtr event;

        if (qemuProcessStopCPUs(driver, vm,
                                VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
                                QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
            VIR_WARN("Unable to pause guest CPUs for %s", vm->def->name);
            return;
        }

        event = virDomainEventLifecycleNewFromObj(vm,
                                VIR_DOMAIN_EVENT_SUSPENDED,
                                VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY_FAILED);
1289
        virObjectEventStateQueue(driver->domainEventState, event);
1290 1291 1292 1293 1294 1295 1296
    } else {
        virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
                             VIR_DOMAIN_PAUSED_POSTCOPY_FAILED);
    }
}


1297
static int
1298
qemuMigrationSrcWaitForSpice(virDomainObjPtr vm)
1299 1300 1301
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

1302 1303
    if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SEAMLESS_MIGRATION) ||
        !priv->job.spiceMigration)
1304 1305
        return 0;

1306
    VIR_DEBUG("Waiting for SPICE to finish migration");
1307 1308
    while (!priv->job.spiceMigrated && !priv->job.abortJob) {
        if (virDomainObjWait(vm) < 0)
1309 1310 1311 1312
            return -1;
    }
    return 0;
}
1313

1314 1315 1316 1317

static void
qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
{
1318
    switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) {
1319 1320 1321 1322
    case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY;
        break;

1323
    case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
1324
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED;
1325 1326 1327
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
1328
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE;
1329 1330 1331
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
1332
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1333 1334 1335
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
1336
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
1337 1338
        break;

1339 1340 1341 1342 1343 1344 1345 1346
    case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED;
        break;

    case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
        break;

1347 1348 1349
    case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
    case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
    case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING:
1350
    case QEMU_MONITOR_MIGRATION_STATUS_LAST:
1351 1352 1353 1354 1355 1356
        break;
    }
}


int
1357 1358 1359 1360 1361
qemuMigrationAnyFetchStats(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           qemuDomainAsyncJob asyncJob,
                           qemuDomainJobInfoPtr jobInfo,
                           char **error)
1362 1363
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1364
    qemuMonitorMigrationStats stats;
1365 1366 1367 1368 1369
    int rv;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

1370
    rv = qemuMonitorGetMigrationStats(priv->mon, &stats, error);
1371 1372 1373 1374

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        return -1;

1375
    jobInfo->stats.mig = stats;
1376 1377

    return 0;
1378 1379 1380
}


1381 1382 1383 1384 1385 1386 1387
static const char *
qemuMigrationJobName(virDomainObjPtr vm)
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    switch (priv->job.asyncJob) {
    case QEMU_ASYNC_JOB_MIGRATION_OUT:
1388
        return _("migration out job");
1389 1390 1391 1392
    case QEMU_ASYNC_JOB_SAVE:
        return _("domain save job");
    case QEMU_ASYNC_JOB_DUMP:
        return _("domain core dump job");
1393 1394 1395 1396 1397 1398 1399 1400 1401
    case QEMU_ASYNC_JOB_NONE:
        return _("undefined");
    case QEMU_ASYNC_JOB_MIGRATION_IN:
        return _("migration in job");
    case QEMU_ASYNC_JOB_SNAPSHOT:
        return _("snapshot job");
    case QEMU_ASYNC_JOB_START:
        return _("start job");
    case QEMU_ASYNC_JOB_LAST:
1402 1403 1404 1405 1406 1407
    default:
        return _("job");
    }
}


1408
static int
1409
qemuMigrationJobCheckStatus(virQEMUDriverPtr driver,
1410
                            virDomainObjPtr vm,
1411
                            qemuDomainAsyncJob asyncJob)
1412 1413 1414
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
1415
    char *error = NULL;
1416
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
1417
    int ret = -1;
1418

1419
    if (!events ||
1420
        jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
1421
        if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
1422 1423
            return -1;
    }
1424

1425 1426
    qemuMigrationUpdateJobType(jobInfo);

1427 1428
    switch (jobInfo->status) {
    case QEMU_DOMAIN_JOB_STATUS_NONE:
1429 1430
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("is not active"));
1431
        goto cleanup;
1432

1433
    case QEMU_DOMAIN_JOB_STATUS_FAILED:
1434
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
1435 1436 1437
                       qemuMigrationJobName(vm),
                       error ? error : _("unexpectedly failed"));
        goto cleanup;
1438

1439
    case QEMU_DOMAIN_JOB_STATUS_CANCELED:
1440 1441
        virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("canceled by client"));
1442
        goto cleanup;
1443

1444 1445
    case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
    case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
1446
    case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
1447
    case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
1448
    case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
1449
    case QEMU_DOMAIN_JOB_STATUS_PAUSED:
1450 1451
        break;
    }
1452 1453 1454 1455 1456 1457

    ret = 0;

 cleanup:
    VIR_FREE(error);
    return ret;
1458 1459 1460
}


1461 1462
enum qemuMigrationCompletedFlags {
    QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR = (1 << 0),
1463
    /* This flag should only be set when run on src host */
1464
    QEMU_MIGRATION_COMPLETED_CHECK_STORAGE  = (1 << 1),
1465
    QEMU_MIGRATION_COMPLETED_POSTCOPY       = (1 << 2),
1466
    QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER = (1 << 3),
1467 1468
};

1469

1470 1471 1472 1473 1474 1475 1476
/**
 * Returns 1 if migration completed successfully,
 *         0 if the domain is still being migrated,
 *         -1 migration failed,
 *         -2 something else failed, we need to cancel migration.
 */
static int
1477 1478 1479 1480 1481
qemuMigrationAnyCompleted(virQEMUDriverPtr driver,
                          virDomainObjPtr vm,
                          qemuDomainAsyncJob asyncJob,
                          virConnectPtr dconn,
                          unsigned int flags)
1482 1483 1484 1485 1486
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
    int pauseReason;

1487
    if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
1488 1489
        goto error;

1490
    /* This flag should only be set when run on src host */
1491
    if (flags & QEMU_MIGRATION_COMPLETED_CHECK_STORAGE &&
1492
        qemuMigrationSrcNBDStorageCopyReady(vm, asyncJob) < 0)
1493 1494
        goto error;

1495
    if (flags & QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR &&
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
        virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
        pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
        virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
                       qemuMigrationJobName(vm), _("failed due to I/O error"));
        goto error;
    }

    if (dconn && virConnectIsAlive(dconn) <= 0) {
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Lost connection to destination host"));
        goto error;
    }

1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
    /* Migration was paused before serializing device state, let's return to
     * the caller so that it can finish all block jobs, resume migration, and
     * wait again for the real end of the migration.
     */
    if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER &&
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
        VIR_DEBUG("Migration paused before switchover");
        return 1;
    }

1519 1520 1521 1522 1523
    /* In case of postcopy the source considers migration completed at the
     * moment it switched from active to postcopy-active state. The destination
     * will continue waiting until the migrate state changes to completed.
     */
    if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY &&
1524
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
1525 1526 1527 1528
        VIR_DEBUG("Migration switched to post-copy");
        return 1;
    }

1529
    if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
1530 1531 1532 1533 1534
        return 1;
    else
        return 0;

 error:
1535 1536 1537
    switch (jobInfo->status) {
    case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
    case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
1538
    case QEMU_DOMAIN_JOB_STATUS_PAUSED:
1539
        /* The migration was aborted by us rather than QEMU itself. */
1540
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1541
        return -2;
1542 1543 1544

    case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
        /* Something failed after QEMU already finished the migration. */
1545
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1546
        return -1;
1547 1548 1549 1550

    case QEMU_DOMAIN_JOB_STATUS_FAILED:
    case QEMU_DOMAIN_JOB_STATUS_CANCELED:
        /* QEMU aborted the migration. */
1551
        return -1;
1552 1553 1554 1555 1556 1557

    case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
    case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
    case QEMU_DOMAIN_JOB_STATUS_NONE:
        /* Impossible. */
        break;
1558
    }
1559 1560

    return -1;
1561 1562 1563
}


1564 1565 1566
/* Returns 0 on success, -2 when migration needs to be cancelled, or -1 when
 * QEMU reports failed migration.
 */
1567
static int
1568 1569 1570 1571 1572
qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  qemuDomainAsyncJob asyncJob,
                                  virConnectPtr dconn,
                                  unsigned int flags)
1573
{
1574
    qemuDomainObjPrivatePtr priv = vm->privateData;
J
Jiri Denemark 已提交
1575
    qemuDomainJobInfoPtr jobInfo = priv->job.current;
1576
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
1577
    int rv;
1578

1579 1580
    jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;

1581 1582
    while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
                                           dconn, flags)) != 1) {
1583 1584
        if (rv < 0)
            return rv;
1585

1586 1587
        if (events) {
            if (virDomainObjWait(vm) < 0) {
1588 1589
                if (virDomainObjIsActive(vm))
                    jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
                return -2;
            }
        } else {
            /* Poll every 50ms for progress & to allow cancellation */
            struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };

            virObjectUnlock(vm);
            nanosleep(&ts, NULL);
            virObjectLock(vm);
        }
1600 1601
    }

1602
    if (events)
1603
        ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL));
1604

1605
    qemuDomainJobInfoUpdateTime(jobInfo);
1606 1607
    qemuDomainJobInfoUpdateDowntime(jobInfo);
    VIR_FREE(priv->job.completed);
1608
    if (VIR_ALLOC(priv->job.completed) == 0) {
1609
        *priv->job.completed = *jobInfo;
1610 1611
        priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
    }
1612

1613 1614 1615 1616
    if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
        jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
        jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;

1617
    return 0;
1618 1619 1620
}


1621
static int
1622 1623 1624 1625
qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver,
                                  virDomainObjPtr vm,
                                  qemuDomainAsyncJob asyncJob,
                                  bool postcopy)
1626 1627
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1628
    unsigned int flags = 0;
1629 1630 1631 1632 1633 1634 1635
    int rv;

    if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT))
        return 0;

    VIR_DEBUG("Waiting for incoming migration to complete");

1636 1637 1638
    if (postcopy)
        flags = QEMU_MIGRATION_COMPLETED_POSTCOPY;

1639 1640
    while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
                                           NULL, flags)) != 1) {
1641 1642 1643 1644 1645 1646 1647 1648
        if (rv < 0 || virDomainObjWait(vm) < 0)
            return -1;
    }

    return 0;
}


1649
static int
1650 1651 1652 1653
qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 qemuMigrationCookiePtr cookie,
                                 const char *graphicsuri)
1654 1655
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
1656 1657
    int ret = -1;
    const char *listenAddress = NULL;
1658
    virSocketAddr addr;
1659 1660 1661 1662 1663
    virURIPtr uri = NULL;
    int type = -1;
    int port = -1;
    int tlsPort = -1;
    const char *tlsSubject = NULL;
1664

1665
    if (!cookie || (!cookie->graphics && !graphicsuri))
1666 1667
        return 0;

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
    if (graphicsuri && !(uri = virURIParse(graphicsuri)))
        goto cleanup;

    if (cookie->graphics) {
        type = cookie->graphics->type;

        listenAddress = cookie->graphics->listen;

        if (!listenAddress ||
            (virSocketAddrParse(&addr, listenAddress, AF_UNSPEC) > 0 &&
             virSocketAddrIsWildcard(&addr)))
            listenAddress = cookie->remoteHostname;

        port = cookie->graphics->port;
        tlsPort = cookie->graphics->tlsPort;
        tlsSubject = cookie->graphics->tlsSubject;
    }

    if (uri) {
1687
        size_t i;
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714

        if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("unknown graphics type %s"), uri->scheme);
            goto cleanup;
        }

        if (uri->server)
            listenAddress = uri->server;
        if (uri->port > 0)
            port = uri->port;

        for (i = 0; i < uri->paramsCount; i++) {
            virURIParamPtr param = uri->params + i;

            if (STRCASEEQ(param->name, "tlsPort")) {
                if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
                    virReportError(VIR_ERR_INVALID_ARG,
                                   _("invalid tlsPort number: %s"),
                                   param->value);
                    goto cleanup;
                }
            } else if (STRCASEEQ(param->name, "tlsSubject")) {
                tlsSubject = param->value;
            }
        }
    }
1715 1716 1717 1718

    /* QEMU doesn't support VNC relocation yet, so
     * skip it to avoid generating an error
     */
1719 1720 1721 1722
    if (type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
        ret = 0;
        goto cleanup;
    }
1723

1724 1725 1726 1727 1728 1729 1730 1731 1732
    /* Older libvirt sends port == 0 for listen type='none' graphics. It's
     * safe to ignore such requests since relocation to unknown port does
     * not make sense in general.
     */
    if (port <= 0 && tlsPort <= 0) {
        ret = 0;
        goto cleanup;
    }

1733 1734 1735 1736
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
        ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
                                          port, tlsPort, tlsSubject);
1737
        priv->job.spiceMigration = !ret;
1738 1739
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
            ret = -1;
1740
    }
1741

1742
 cleanup:
1743
    virURIFree(uri);
1744 1745 1746 1747
    return ret;
}


1748
static int
1749 1750 1751
qemuMigrationDstOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
                            virDomainObjPtr vm,
                            qemuMigrationCookiePtr cookie)
1752
{
1753 1754
    virDomainNetDefPtr netptr;
    int ret = -1;
1755
    size_t i;
1756 1757 1758 1759 1760 1761 1762 1763

    for (i = 0; i < cookie->network->nnets; i++) {
        netptr = vm->def->nets[i];

        switch (cookie->network->net[i].vporttype) {
        case VIR_NETDEV_VPORT_PROFILE_NONE:
        case VIR_NETDEV_VPORT_PROFILE_8021QBG:
        case VIR_NETDEV_VPORT_PROFILE_8021QBH:
1764
           break;
1765
        case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
1766 1767
            if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
                                                   netptr->ifname) != 0) {
J
Jiri Denemark 已提交
1768 1769 1770
                virReportError(VIR_ERR_INTERNAL_ERROR,
                               _("Unable to run command to set OVS port data for "
                                 "interface %s"), netptr->ifname);
1771 1772 1773
                goto cleanup;
            }
            break;
1774 1775 1776 1777 1778
        default:
            break;
        }
    }

1779
    ret = 0;
1780
 cleanup:
1781 1782 1783 1784
    return ret;
}


1785
int
1786 1787
qemuMigrationDstCheckProtocol(virQEMUCapsPtr qemuCaps,
                              const char *migrateFrom)
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
{
    if (STRPREFIX(migrateFrom, "rdma")) {
        if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("incoming RDMA migration is not supported "
                             "with this QEMU binary"));
            return -1;
        }
    } else if (!STRPREFIX(migrateFrom, "tcp") &&
               !STRPREFIX(migrateFrom, "exec") &&
               !STRPREFIX(migrateFrom, "fd") &&
               !STRPREFIX(migrateFrom, "unix") &&
               STRNEQ(migrateFrom, "stdio")) {
        virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                       _("unknown migration protocol"));
        return -1;
    }

    return 0;
}


char *
1811 1812
qemuMigrationDstGetURI(const char *migrateFrom,
                       int migrateFd)
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
{
    char *uri = NULL;

    if (STREQ(migrateFrom, "stdio"))
        ignore_value(virAsprintf(&uri, "fd:%d", migrateFd));
    else
        ignore_value(VIR_STRDUP(uri, migrateFrom));

    return uri;
}


1825
int
1826 1827 1828 1829
qemuMigrationDstRun(virQEMUDriverPtr driver,
                    virDomainObjPtr vm,
                    const char *uri,
                    qemuDomainAsyncJob asyncJob)
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret = -1;
    int rv;

    VIR_DEBUG("Setting up incoming migration with URI %s", uri);

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    rv = qemuMonitorMigrateIncoming(priv->mon, uri);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
        goto cleanup;

    if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
1846
        /* qemuMigrationDstWaitForCompletion is called from the Finish phase */
1847 1848 1849 1850
        ret = 0;
        goto cleanup;
    }

1851
    if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
1852 1853 1854 1855 1856 1857 1858 1859 1860
        goto cleanup;

    ret = 0;

 cleanup:
    return ret;
}


1861 1862 1863 1864 1865 1866
/* This is called for outgoing non-p2p migrations when a connection to the
 * client which initiated the migration was closed but we were waiting for it
 * to follow up with the next phase, that is, in between
 * qemuDomainMigrateBegin3 and qemuDomainMigratePerform3 or
 * qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
 */
1867
static void
1868 1869 1870
qemuMigrationSrcCleanup(virDomainObjPtr vm,
                        virConnectPtr conn,
                        void *opaque)
1871
{
1872
    virQEMUDriverPtr driver = opaque;
1873 1874 1875 1876 1877 1878 1879 1880 1881
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
              vm->def->name, conn,
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
              qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
                                              priv->job.phase));

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
1882
        return;
1883 1884 1885 1886 1887

    VIR_DEBUG("The connection which started outgoing migration of domain %s"
              " was closed; canceling the migration",
              vm->def->name);

1888
    switch ((qemuMigrationJobPhase) priv->job.phase) {
1889 1890 1891 1892 1893 1894 1895 1896 1897
    case QEMU_MIGRATION_PHASE_BEGIN3:
        /* just forget we were about to migrate */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
        VIR_WARN("Migration of domain %s finished but we don't know if the"
                 " domain was successfully started on destination or not",
                 vm->def->name);
1898
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
1899
                                 priv->job.migParams, priv->job.apiFlags);
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
        /* clear the job and let higher levels decide what to do */
        qemuDomainObjDiscardAsyncJob(driver, vm);
        break;

    case QEMU_MIGRATION_PHASE_PERFORM3:
        /* cannot be seen without an active migration API; unreachable */
    case QEMU_MIGRATION_PHASE_CONFIRM3:
    case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
        /* all done; unreachable */
    case QEMU_MIGRATION_PHASE_PREPARE:
    case QEMU_MIGRATION_PHASE_FINISH2:
    case QEMU_MIGRATION_PHASE_FINISH3:
        /* incoming migration; unreachable */
    case QEMU_MIGRATION_PHASE_PERFORM2:
        /* single phase outgoing migration; unreachable */
    case QEMU_MIGRATION_PHASE_NONE:
    case QEMU_MIGRATION_PHASE_LAST:
        /* unreachable */
        ;
    }
}

1922

1923
/* The caller is supposed to lock the vm and start a migration job. */
1924
static char *
1925 1926 1927 1928 1929 1930 1931 1932 1933
qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver,
                           virDomainObjPtr vm,
                           const char *xmlin,
                           const char *dname,
                           char **cookieout,
                           int *cookieoutlen,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           unsigned long flags)
1934 1935 1936
{
    char *rv = NULL;
    qemuMigrationCookiePtr mig = NULL;
1937
    virDomainDefPtr def = NULL;
1938
    qemuDomainObjPrivatePtr priv = vm->privateData;
1939
    virCapsPtr caps = NULL;
1940
    unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_LOCKSTATE;
1941

1942
    VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, dname=%s,"
1943
              " cookieout=%p, cookieoutlen=%p,"
1944
              " nmigrate_disks=%zu, migrate_disks=%p, flags=0x%lx",
1945
              driver, vm, NULLSTR(xmlin), NULLSTR(dname),
1946 1947
              cookieout, cookieoutlen, nmigrate_disks,
              migrate_disks, flags);
1948

1949 1950 1951
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

1952 1953 1954 1955 1956 1957
    /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
     * Otherwise we will start the async job later in the perform phase losing
     * change protection.
     */
    if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
1958

1959
    if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
1960 1961
        goto cleanup;

1962
    if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
1963
        !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
1964 1965
        goto cleanup;

1966 1967 1968 1969 1970 1971 1972 1973 1974
    if (flags & VIR_MIGRATE_POSTCOPY &&
        (!(flags & VIR_MIGRATE_LIVE) ||
         flags & VIR_MIGRATE_PAUSED)) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy migration is not supported with non-live "
                         "or paused migration"));
        goto cleanup;
    }

1975 1976 1977 1978 1979 1980
    if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy is not supported with tunnelled migration"));
        goto cleanup;
    }

1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
    if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) {
        bool has_drive_mirror =  virQEMUCapsGet(priv->qemuCaps,
                                                QEMU_CAPS_DRIVE_MIRROR);

        if (nmigrate_disks) {
            if (has_drive_mirror) {
                size_t i, j;
                /* Check user requested only known disk targets. */
                for (i = 0; i < nmigrate_disks; i++) {
                    for (j = 0; j < vm->def->ndisks; j++) {
                        if (STREQ(vm->def->disks[j]->dst, migrate_disks[i]))
                            break;
                    }

                    if (j == vm->def->ndisks) {
                        virReportError(VIR_ERR_INVALID_ARG,
                                       _("disk target %s not found"),
                                       migrate_disks[i]);
                        goto cleanup;
                    }
                }

                if (flags & VIR_MIGRATE_TUNNELLED) {
                    virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                                   _("Selecting disks to migrate is not "
                                     "implemented for tunnelled migration"));
                    goto cleanup;
                }
            } else {
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                               _("qemu does not support drive-mirror command"));
                goto cleanup;
            }
        }

        if (has_drive_mirror) {
            /* TODO support NBD for TUNNELLED migration */
            if (flags & VIR_MIGRATE_TUNNELLED) {
                VIR_WARN("NBD in tunnelled migration is currently not supported");
            } else {
                cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
                priv->nbdPort = 0;
            }
2024 2025 2026
        }
    }

2027
    if (virDomainDefHasMemoryHotplug(vm->def) ||
2028
        ((flags & VIR_MIGRATE_PERSIST_DEST) &&
2029
         vm->newDef && virDomainDefHasMemoryHotplug(vm->newDef)))
2030 2031
        cookieFlags |= QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG;

2032 2033 2034 2035 2036
    if (!qemuDomainVcpuHotplugIsInOrder(vm->def) ||
        ((flags & VIR_MIGRATE_PERSIST_DEST) &&
         vm->newDef && !qemuDomainVcpuHotplugIsInOrder(vm->newDef)))
        cookieFlags |= QEMU_MIGRATION_COOKIE_CPU_HOTPLUG;

2037 2038 2039
    if (priv->origCPU)
        cookieFlags |= QEMU_MIGRATION_COOKIE_CPU;

2040 2041
    cookieFlags |= QEMU_MIGRATION_COOKIE_ALLOW_REBOOT;

2042 2043 2044
    if (!(flags & VIR_MIGRATE_OFFLINE))
        cookieFlags |= QEMU_MIGRATION_COOKIE_CAPS;

2045 2046
    if (!(mig = qemuMigrationEatCookie(driver, vm->def,
                                       priv->origname, priv, NULL, 0, 0)))
2047 2048 2049
        goto cleanup;

    if (qemuMigrationBakeCookie(mig, driver, vm,
2050
                                QEMU_MIGRATION_SOURCE,
2051
                                cookieout, cookieoutlen,
2052
                                cookieFlags) < 0)
2053 2054
        goto cleanup;

L
liguang 已提交
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (flags & VIR_MIGRATE_TUNNELLED) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
    }

2077
    if (xmlin) {
2078
        if (!(def = virDomainDefParseString(xmlin, caps, driver->xmlopt, priv->qemuCaps,
2079 2080
                                            VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                            VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
2081 2082
            goto cleanup;

2083
        if (!qemuDomainCheckABIStability(driver, vm, def))
2084 2085
            goto cleanup;

2086
        rv = qemuDomainDefFormatLive(driver, def, NULL, false, true);
2087
    } else {
2088 2089
        rv = qemuDomainDefFormatLive(driver, vm->def, priv->origCPU,
                                     false, true);
2090
    }
2091

2092
 cleanup:
2093
    qemuMigrationCookieFree(mig);
2094
    virObjectUnref(caps);
2095
    virDomainDefFree(def);
2096 2097 2098
    return rv;
}

2099
char *
2100 2101 2102 2103 2104 2105 2106 2107 2108
qemuMigrationSrcBegin(virConnectPtr conn,
                      virDomainObjPtr vm,
                      const char *xmlin,
                      const char *dname,
                      char **cookieout,
                      int *cookieoutlen,
                      size_t nmigrate_disks,
                      const char **migrate_disks,
                      unsigned long flags)
2109 2110 2111
{
    virQEMUDriverPtr driver = conn->privateData;
    char *xml = NULL;
2112
    qemuDomainAsyncJob asyncJob;
2113 2114

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
2115 2116
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                  flags) < 0)
2117 2118 2119 2120 2121 2122 2123 2124
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
    } else {
        if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
            goto cleanup;
        asyncJob = QEMU_ASYNC_JOB_NONE;
    }

2125
    qemuMigrationSrcStoreDomainState(vm);
2126

2127
    if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
2128 2129 2130 2131 2132 2133
        goto endjob;

    /* Check if there is any ejected media.
     * We don't want to require them on the destination.
     */
    if (!(flags & VIR_MIGRATE_OFFLINE) &&
2134
        qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
2135 2136
        goto endjob;

2137 2138 2139
    if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
                                           cookieout, cookieoutlen,
                                           nmigrate_disks, migrate_disks, flags)))
2140 2141 2142 2143 2144 2145 2146
        goto endjob;

    if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
        /* We keep the job active across API calls until the confirm() call.
         * This prevents any other APIs being invoked while migration is taking
         * place.
         */
2147
        if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
2148
                                 qemuMigrationSrcCleanup) < 0) {
2149
            VIR_FREE(xml);
2150
            goto endjob;
2151
        }
2152
        qemuMigrationJobContinue(vm);
2153 2154 2155 2156
    } else {
        goto endjob;
    }

2157
 cleanup:
M
Michal Privoznik 已提交
2158
    virDomainObjEndAPI(&vm);
2159 2160
    return xml;

2161
 endjob:
2162 2163 2164 2165
    if (flags & VIR_MIGRATE_CHANGE_PROTECTION)
        qemuMigrationJobFinish(driver, vm);
    else
        qemuDomainObjEndJob(driver, vm);
2166 2167 2168
    goto cleanup;
}

2169

2170 2171
/* Prepare is the first step, and it runs on the destination host.
 */
2172

2173
static void
2174 2175
qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver,
                               virDomainObjPtr vm)
2176 2177 2178 2179 2180 2181 2182 2183 2184
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
              driver,
              vm->def->name,
              qemuDomainJobTypeToString(priv->job.active),
              qemuDomainAsyncJobTypeToString(priv->job.asyncJob));

2185
    virPortAllocatorRelease(priv->migrationPort);
2186 2187
    priv->migrationPort = 0;

2188 2189 2190 2191 2192
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
        return;
    qemuDomainObjDiscardAsyncJob(driver, vm);
}

2193
static qemuProcessIncomingDefPtr
2194 2195 2196 2197 2198 2199
qemuMigrationDstPrepare(virDomainObjPtr vm,
                        bool tunnel,
                        const char *protocol,
                        const char *listenAddress,
                        unsigned short port,
                        int fd)
2200 2201
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
2202
    qemuProcessIncomingDefPtr inc = NULL;
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
    char *migrateFrom = NULL;

    if (tunnel) {
        if (VIR_STRDUP(migrateFrom, "stdio") < 0)
            goto cleanup;
    } else {
        bool encloseAddress = false;
        bool hostIPv6Capable = false;
        struct addrinfo *info = NULL;
        struct addrinfo hints = { .ai_flags = AI_ADDRCONFIG,
                                  .ai_socktype = SOCK_STREAM };
        const char *incFormat;

        if (getaddrinfo("::", NULL, &hints, &info) == 0) {
            freeaddrinfo(info);
            hostIPv6Capable = true;
        }

        if (listenAddress) {
            if (virSocketAddrNumericFamily(listenAddress) == AF_INET6) {
                if (!hostIPv6Capable) {
                    virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                                   _("host isn't capable of IPv6"));
                    goto cleanup;
                }
                /* IPv6 address must be escaped in brackets on the cmd line */
                encloseAddress = true;
            } else {
                /* listenAddress is a hostname or IPv4 */
            }
2233
        } else if (hostIPv6Capable) {
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
            /* Listen on :: instead of 0.0.0.0 if QEMU understands it
             * and there is at least one IPv6 address configured
             */
            listenAddress = "::";
            encloseAddress = true;
        } else {
            listenAddress = "0.0.0.0";
        }

        /* QEMU will be started with
         *   -incoming protocol:[<IPv6 addr>]:port,
         *   -incoming protocol:<IPv4 addr>:port, or
         *   -incoming protocol:<hostname>:port
         */
        if (encloseAddress)
            incFormat = "%s:[%s]:%d";
        else
            incFormat = "%s:%s:%d";
        if (virAsprintf(&migrateFrom, incFormat,
                        protocol, listenAddress, port) < 0)
            goto cleanup;
    }

2257 2258
    inc = qemuProcessIncomingDefNew(priv->qemuCaps, listenAddress,
                                    migrateFrom, fd, NULL);
2259

2260
 cleanup:
2261 2262
    VIR_FREE(migrateFrom);
    return inc;
2263 2264
}

2265
static int
2266
qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
2267
                           virConnectPtr dconn,
2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           virDomainDefPtr *def,
                           const char *origname,
                           virStreamPtr st,
                           const char *protocol,
                           unsigned short port,
                           bool autoPort,
                           const char *listenAddress,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           int nbdPort,
2282
                           qemuMigrationParamsPtr migParams,
2283
                           unsigned long flags)
2284 2285
{
    virDomainObjPtr vm = NULL;
2286
    virObjectEventPtr event = NULL;
2287
    virErrorPtr origErr;
2288
    int ret = -1;
2289
    int dataFD[2] = { -1, -1 };
2290
    qemuDomainObjPrivatePtr priv = NULL;
2291
    qemuMigrationCookiePtr mig = NULL;
2292
    bool tunnel = !!st;
J
Jiri Denemark 已提交
2293
    char *xmlout = NULL;
L
liguang 已提交
2294
    unsigned int cookieFlags;
2295
    unsigned int startFlags;
2296
    virCapsPtr caps = NULL;
2297
    qemuProcessIncomingDefPtr incoming = NULL;
2298
    bool taint_hook = false;
2299 2300 2301
    bool stopProcess = false;
    bool relabel = false;
    int rv;
2302
    char *tlsAlias = NULL;
2303

2304 2305
    virNWFilterReadLockFilterUpdates();

L
liguang 已提交
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
    if (flags & VIR_MIGRATE_OFFLINE) {
        if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
                     VIR_MIGRATE_NON_SHARED_INC)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration cannot handle "
                             "non-shared storage"));
            goto cleanup;
        }
        if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("offline migration must be specified with "
                             "the persistent flag set"));
            goto cleanup;
        }
        if (tunnel) {
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("tunnelled offline migration does not "
                             "make sense"));
            goto cleanup;
        }
2326 2327
        cookieFlags = 0;
    } else {
2328 2329
        cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS |
                      QEMU_MIGRATION_COOKIE_CAPS;
L
liguang 已提交
2330 2331
    }

2332 2333 2334 2335 2336 2337 2338 2339 2340
    if (flags & VIR_MIGRATE_POSTCOPY &&
        (!(flags & VIR_MIGRATE_LIVE) ||
         flags & VIR_MIGRATE_PAUSED)) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy migration is not supported with non-live "
                         "or paused migration"));
        goto cleanup;
    }

2341 2342 2343 2344 2345 2346
    if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("post-copy is not supported with tunnelled migration"));
        goto cleanup;
    }

2347 2348 2349
    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

2350
    if (!qemuMigrationSrcIsAllowedHostdev(*def))
2351 2352
        goto cleanup;

J
Jiri Denemark 已提交
2353 2354 2355 2356 2357
    /* Let migration hook filter domain XML */
    if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
        char *xml;
        int hookret;

2358
        if (!(xml = qemuDomainDefFormatXML(driver, *def,
2359 2360
                                           VIR_DOMAIN_XML_SECURE |
                                           VIR_DOMAIN_XML_MIGRATABLE)))
J
Jiri Denemark 已提交
2361 2362
            goto cleanup;

2363
        hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, (*def)->name,
J
Jiri Denemark 已提交
2364 2365 2366 2367 2368 2369 2370
                              VIR_HOOK_QEMU_OP_MIGRATE, VIR_HOOK_SUBOP_BEGIN,
                              NULL, xml, &xmlout);
        VIR_FREE(xml);

        if (hookret < 0) {
            goto cleanup;
        } else if (hookret == 0) {
2371
            if (virStringIsEmpty(xmlout)) {
J
Jiri Denemark 已提交
2372 2373 2374 2375 2376 2377
                VIR_DEBUG("Migrate hook filter returned nothing; using the"
                          " original XML");
            } else {
                virDomainDefPtr newdef;

                VIR_DEBUG("Using hook-filtered domain XML: %s", xmlout);
2378
                newdef = virDomainDefParseString(xmlout, caps, driver->xmlopt, NULL,
2379 2380
                                                 VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                                 VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE);
J
Jiri Denemark 已提交
2381 2382 2383
                if (!newdef)
                    goto cleanup;

2384
                if (!qemuDomainDefCheckABIStability(driver, *def, newdef)) {
J
Jiri Denemark 已提交
2385 2386 2387 2388
                    virDomainDefFree(newdef);
                    goto cleanup;
                }

2389 2390
                virDomainDefFree(*def);
                *def = newdef;
2391 2392 2393 2394
                /* We should taint the domain here. However, @vm and therefore
                 * privateData too are still NULL, so just notice the fact and
                 * taint it later. */
                taint_hook = true;
J
Jiri Denemark 已提交
2395 2396 2397 2398
            }
        }
    }

2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
    /* Parse cookie earlier than adding the domain onto the
     * domain list. Parsing/validation may fail and there's no
     * point in having the domain in the list at that point. */
    if (!(mig = qemuMigrationEatCookie(driver, *def, origname, NULL,
                                       cookiein, cookieinlen,
                                       QEMU_MIGRATION_COOKIE_LOCKSTATE |
                                       QEMU_MIGRATION_COOKIE_NBD |
                                       QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG |
                                       QEMU_MIGRATION_COOKIE_CPU_HOTPLUG |
                                       QEMU_MIGRATION_COOKIE_CPU |
                                       QEMU_MIGRATION_COOKIE_ALLOW_REBOOT |
                                       QEMU_MIGRATION_COOKIE_CAPS)))
        goto cleanup;

2413
    if (!(vm = virDomainObjListAdd(driver->domains, *def,
2414
                                   driver->xmlopt,
2415 2416 2417
                                   VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
                                   VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
                                   NULL)))
2418
        goto cleanup;
2419
    *def = NULL;
2420

2421
    priv = vm->privateData;
2422 2423
    if (VIR_STRDUP(priv->origname, origname) < 0)
        goto cleanup;
2424

2425 2426 2427 2428 2429
    if (taint_hook) {
        /* Domain XML has been altered by a hook script. */
        priv->hookRun = true;
    }

2430 2431
    if (STREQ_NULLABLE(protocol, "rdma") &&
        !virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
M
Michael R. Hines 已提交
2432 2433 2434 2435 2436 2437
        virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                       _("cannot start RDMA migration with no memory hard "
                         "limit set"));
        goto cleanup;
    }

2438 2439 2440
    if (qemuMigrationDstPrecreateStorage(vm, mig->nbd,
                                         nmigrate_disks, migrate_disks,
                                         !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
2441 2442
        goto cleanup;

2443 2444
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
                              flags) < 0)
2445
        goto cleanup;
2446
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
2447 2448 2449 2450

    /* Domain starts inactive, even if the domain XML had an id field. */
    vm->def->id = -1;

L
liguang 已提交
2451 2452 2453
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

2454 2455
    if (tunnel &&
        (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
2456 2457
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
2458
        goto stopjob;
2459 2460
    }

2461 2462
    startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;

2463
    if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
2464
                        true, startFlags) < 0)
2465
        goto stopjob;
2466
    stopProcess = true;
2467

2468 2469
    priv->allowReboot = mig->allowReboot;

2470 2471 2472
    if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol,
                                             listenAddress, port,
                                             dataFD[0])))
2473
        goto stopjob;
2474

2475
    if (qemuProcessPrepareDomain(driver, vm, startFlags) < 0)
2476 2477
        goto stopjob;

2478
    if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
2479 2480
        goto stopjob;

2481
    rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2482 2483
                           incoming, NULL,
                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
2484
                           startFlags);
2485 2486 2487
    if (rv < 0) {
        if (rv == -2)
            relabel = true;
2488
        goto stopjob;
2489
    }
2490
    relabel = true;
2491

2492 2493 2494 2495
    if (tunnel) {
        if (virFDStreamOpen(st, dataFD[1]) < 0) {
            virReportSystemError(errno, "%s",
                                 _("cannot pass pipe for tunnelled migration"));
2496
            goto stopjob;
2497
        }
2498
        dataFD[1] = -1; /* 'st' owns the FD now & will close it */
2499 2500
    }

2501
    if (STREQ_NULLABLE(protocol, "rdma") &&
M
Michael R. Hines 已提交
2502
        virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
2503
        goto stopjob;
M
Michael R. Hines 已提交
2504 2505
    }

2506
    if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2507
                                 migParams, mig->caps->automatic) < 0)
2508 2509 2510 2511 2512 2513 2514
        goto stopjob;

    /* Migrations using TLS need to add the "tls-creds-x509" object and
     * set the migration TLS parameters */
    if (flags & VIR_MIGRATE_TLS) {
        if (qemuMigrationParamsEnableTLS(driver, vm, true,
                                         QEMU_ASYNC_JOB_MIGRATION_IN,
2515
                                         &tlsAlias, NULL,
2516 2517 2518 2519 2520 2521 2522
                                         migParams) < 0)
            goto stopjob;
    } else {
        if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
            goto stopjob;
    }

2523 2524
    if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
                                 migParams) < 0)
2525 2526
        goto stopjob;

2527 2528 2529
    if (mig->nbd &&
        flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
        virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541
        const char *nbdTLSAlias = NULL;

        if (flags & VIR_MIGRATE_TLS) {
            if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_TLS)) {
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                               _("QEMU NBD server does not support TLS transport"));
                goto stopjob;
            }

            nbdTLSAlias = tlsAlias;
        }

2542 2543
        if (qemuMigrationDstStartNBDServer(driver, vm, incoming->address,
                                           nmigrate_disks, migrate_disks,
2544
                                           nbdPort, nbdTLSAlias) < 0) {
2545
            goto stopjob;
2546
        }
2547
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
2548 2549
    }

2550 2551 2552 2553 2554 2555 2556 2557 2558
    if (mig->lockState) {
        VIR_DEBUG("Received lockstate %s", mig->lockState);
        VIR_FREE(priv->lockState);
        priv->lockState = mig->lockState;
        mig->lockState = NULL;
    } else {
        VIR_DEBUG("Received no lockstate");
    }

2559
    if (incoming->deferredURI &&
2560 2561
        qemuMigrationDstRun(driver, vm, incoming->deferredURI,
                            QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
2562 2563
        goto stopjob;

2564
    if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2565 2566 2567
                                 false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
        goto stopjob;

2568
 done:
2569 2570 2571
    if (qemuMigrationBakeCookie(mig, driver, vm,
                                QEMU_MIGRATION_DESTINATION,
                                cookieout, cookieoutlen, cookieFlags) < 0) {
2572 2573 2574 2575 2576 2577 2578
        /* We could tear down the whole guest here, but
         * cookie data is (so far) non-critical, so that
         * seems a little harsh. We'll just warn for now.
         */
        VIR_WARN("Unable to encode migration cookie");
    }

2579
    if (qemuDomainCleanupAdd(vm, qemuMigrationDstPrepareCleanup) < 0)
2580
        goto stopjob;
2581

L
liguang 已提交
2582 2583
    if (!(flags & VIR_MIGRATE_OFFLINE)) {
        virDomainAuditStart(vm, "migrated", true);
2584
        event = virDomainEventLifecycleNewFromObj(vm,
L
liguang 已提交
2585 2586 2587
                                         VIR_DOMAIN_EVENT_STARTED,
                                         VIR_DOMAIN_EVENT_STARTED_MIGRATED);
    }
2588

2589 2590 2591 2592
    /* We keep the job active across API calls until the finish() call.
     * This prevents any other APIs being invoked while incoming
     * migration is taking place.
     */
2593
    qemuMigrationJobContinue(vm);
2594

2595 2596
    if (autoPort)
        priv->migrationPort = port;
2597 2598 2599 2600 2601
    /* in this case port is not auto selected and we don't need to manage it
     * anymore after cookie is baked
     */
    if (nbdPort != 0)
        priv->nbdPort = 0;
2602
    ret = 0;
2603

2604
 cleanup:
2605
    virErrorPreserveLast(&origErr);
2606
    VIR_FREE(tlsAlias);
2607
    qemuProcessIncomingDefFree(incoming);
J
Jiri Denemark 已提交
2608
    VIR_FREE(xmlout);
2609 2610
    VIR_FORCE_CLOSE(dataFD[0]);
    VIR_FORCE_CLOSE(dataFD[1]);
2611 2612 2613
    if (ret < 0 && priv) {
        /* priv is set right after vm is added to the list of domains
         * and there is no 'goto cleanup;' in the middle of those */
2614
        VIR_FREE(priv->origname);
2615 2616 2617 2618
        /* release if port is auto selected which is not the case if
         * it is given in parameters
         */
        if (nbdPort == 0)
2619
            virPortAllocatorRelease(priv->nbdPort);
2620
        priv->nbdPort = 0;
2621
        virDomainObjRemoveTransientDef(vm);
2622
        qemuDomainRemoveInactiveJob(driver, vm);
2623
    }
M
Michal Privoznik 已提交
2624
    virDomainObjEndAPI(&vm);
2625
    virObjectEventStateQueue(driver->domainEventState, event);
2626
    qemuMigrationCookieFree(mig);
2627
    virObjectUnref(caps);
2628
    virNWFilterUnlockFilterUpdates();
2629
    virErrorRestore(&origErr);
2630
    return ret;
2631

2632
 stopjob:
2633
    qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
2634
                             priv->job.migParams, priv->job.apiFlags);
2635

2636 2637 2638 2639
    if (stopProcess) {
        unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
        if (!relabel)
            stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
2640
        virDomainAuditStart(vm, "migrated", false);
2641 2642
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                        QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
2643
    }
2644

2645
    qemuMigrationJobFinish(driver, vm);
2646
    goto cleanup;
2647 2648 2649
}


2650 2651 2652 2653 2654
/*
 * This version starts an empty VM listening on a localhost TCP port, and
 * sets up the corresponding virStream to handle the incoming data.
 */
int
2655
qemuMigrationDstPrepareTunnel(virQEMUDriverPtr driver,
2656
                              virConnectPtr dconn,
2657 2658 2659 2660 2661 2662 2663
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              virStreamPtr st,
                              virDomainDefPtr *def,
                              const char *origname,
2664
                              qemuMigrationParamsPtr migParams,
2665
                              unsigned long flags)
2666
{
2667
    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2668
              "cookieout=%p, cookieoutlen=%p, st=%p, def=%p, "
2669
              "origname=%s, flags=0x%lx",
2670
              driver, dconn, NULLSTR(cookiein), cookieinlen,
2671
              cookieout, cookieoutlen, st, *def, origname, flags);
2672

2673 2674 2675 2676 2677 2678
    if (st == NULL) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("tunnelled migration requested but NULL stream passed"));
        return -1;
    }

2679 2680 2681
    return qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
                                      cookieout, cookieoutlen, def, origname,
                                      st, NULL, 0, false, NULL, 0, NULL, 0,
2682
                                      migParams, flags);
2683 2684 2685
}


2686
static virURIPtr
2687
qemuMigrationAnyParseURI(const char *uri, bool *wellFormed)
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708
{
    char *tmp = NULL;
    virURIPtr parsed;

    /* For compatibility reasons tcp://... URIs are sent as tcp:...
     * We need to transform them to a well-formed URI before parsing. */
    if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri + 4, "//")) {
        if (virAsprintf(&tmp, "tcp://%s", uri + 4) < 0)
            return NULL;
        uri = tmp;
    }

    parsed = virURIParse(uri);
    if (parsed && wellFormed)
        *wellFormed = !tmp;
    VIR_FREE(tmp);

    return parsed;
}


2709
int
2710
qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
2711
                              virConnectPtr dconn,
2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              const char *uri_in,
                              char **uri_out,
                              virDomainDefPtr *def,
                              const char *origname,
                              const char *listenAddress,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
                              int nbdPort,
2724
                              qemuMigrationParamsPtr migParams,
2725
                              unsigned long flags)
2726
{
2727 2728
    unsigned short port = 0;
    bool autoPort = true;
2729 2730
    char *hostname = NULL;
    int ret = -1;
2731
    virURIPtr uri = NULL;
2732 2733
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    const char *migrateHost = cfg->migrateHost;
J
Jiri Denemark 已提交
2734

2735
    VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
2736
              "cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
2737
              "def=%p, origname=%s, listenAddress=%s, "
2738
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, flags=0x%lx",
2739
              driver, dconn, NULLSTR(cookiein), cookieinlen,
2740
              cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
2741
              *def, origname, NULLSTR(listenAddress),
2742
              nmigrate_disks, migrate_disks, nbdPort, flags);
2743

2744 2745
    *uri_out = NULL;

2746 2747 2748
    /* The URI passed in may be NULL or a string "tcp://somehostname:port".
     *
     * If the URI passed in is NULL then we allocate a port number
2749 2750 2751
     * from our pool of port numbers, and if the migrateHost is configured,
     * we return a URI of "tcp://migrateHost:port", otherwise return a URI
     * of "tcp://ourhostname:port".
2752 2753 2754 2755 2756 2757
     *
     * If the URI passed in is not NULL then we try to parse out the
     * port number and use that (note that the hostname is assumed
     * to be a correct hostname which refers to the target machine).
     */
    if (uri_in == NULL) {
2758 2759 2760
        bool encloseAddress = false;
        const char *incFormat;

2761
        if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2762
            goto cleanup;
2763

2764
        if (migrateHost != NULL) {
2765 2766
            if (virSocketAddrNumericFamily(migrateHost) == AF_INET6)
                encloseAddress = true;
2767

2768
            if (VIR_STRDUP(hostname, migrateHost) < 0)
2769 2770 2771 2772 2773
                goto cleanup;
        } else {
            if ((hostname = virGetHostname()) == NULL)
                goto cleanup;
        }
2774 2775

        if (STRPREFIX(hostname, "localhost")) {
2776 2777 2778
            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                           _("hostname on destination resolved to localhost,"
                             " but migration requires an FQDN"));
2779 2780 2781 2782 2783
            goto cleanup;
        }

        /* XXX this really should have been a properly well-formed
         * URI, but we can't add in tcp:// now without breaking
2784
         * compatibility with old targets. We at least make the
2785 2786
         * new targets accept both syntaxes though.
         */
2787 2788 2789 2790 2791 2792
        if (encloseAddress)
            incFormat = "%s:[%s]:%d";
        else
            incFormat = "%s:%s:%d";

        if (virAsprintf(uri_out, incFormat, "tcp", hostname, port) < 0)
2793 2794
            goto cleanup;
    } else {
2795
        bool well_formed_uri;
J
Ján Tomko 已提交
2796

2797
        if (!(uri = qemuMigrationAnyParseURI(uri_in, &well_formed_uri)))
2798
            goto cleanup;
J
Ján Tomko 已提交
2799

2800 2801 2802 2803 2804 2805 2806
        if (uri->scheme == NULL) {
            virReportError(VIR_ERR_INVALID_ARG,
                           _("missing scheme in migration URI: %s"),
                           uri_in);
            goto cleanup;
        }

M
Michael R. Hines 已提交
2807 2808
        if (STRNEQ(uri->scheme, "tcp") &&
            STRNEQ(uri->scheme, "rdma")) {
2809 2810 2811
            virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
                           _("unsupported scheme %s in migration URI %s"),
                           uri->scheme, uri_in);
J
Ján Tomko 已提交
2812 2813 2814 2815 2816 2817 2818 2819 2820 2821
            goto cleanup;
        }

        if (uri->server == NULL) {
            virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
                                                  " URI: %s"), uri_in);
            goto cleanup;
        }

        if (uri->port == 0) {
2822
            if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
2823
                goto cleanup;
2824

2825
            /* Send well-formed URI only if uri_in was well-formed */
2826 2827 2828 2829 2830 2831 2832 2833
            if (well_formed_uri) {
                uri->port = port;
                if (!(*uri_out = virURIFormat(uri)))
                    goto cleanup;
            } else {
                if (virAsprintf(uri_out, "%s:%d", uri_in, port) < 0)
                    goto cleanup;
            }
2834
        } else {
2835 2836
            port = uri->port;
            autoPort = false;
2837 2838 2839 2840 2841 2842
        }
    }

    if (*uri_out)
        VIR_DEBUG("Generated uri_out=%s", *uri_out);

2843
    ret = qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
2844 2845 2846 2847
                                     cookieout, cookieoutlen, def, origname,
                                     NULL, uri ? uri->scheme : "tcp",
                                     port, autoPort, listenAddress,
                                     nmigrate_disks, migrate_disks, nbdPort,
2848
                                     migParams, flags);
2849
 cleanup:
2850
    virURIFree(uri);
2851
    VIR_FREE(hostname);
2852
    virObjectUnref(cfg);
2853
    if (ret != 0) {
2854
        VIR_FREE(*uri_out);
2855
        if (autoPort)
2856
            virPortAllocatorRelease(port);
2857
    }
2858 2859 2860 2861
    return ret;
}


2862
virDomainDefPtr
2863 2864 2865 2866
qemuMigrationAnyPrepareDef(virQEMUDriverPtr driver,
                           const char *dom_xml,
                           const char *dname,
                           char **origname)
2867 2868 2869
{
    virCapsPtr caps = NULL;
    virDomainDefPtr def;
2870
    char *name = NULL;
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880

    if (!dom_xml) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("no domain XML passed"));
        return NULL;
    }

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        return NULL;

2881
    if (!(def = virDomainDefParseString(dom_xml, caps, driver->xmlopt, NULL,
2882 2883
                                        VIR_DOMAIN_DEF_PARSE_INACTIVE |
                                        VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
2884 2885 2886
        goto cleanup;

    if (dname) {
2887
        name = def->name;
2888 2889 2890 2891 2892 2893
        if (VIR_STRDUP(def->name, dname) < 0) {
            virDomainDefFree(def);
            def = NULL;
        }
    }

2894
 cleanup:
2895
    virObjectUnref(caps);
2896 2897 2898 2899
    if (def && origname)
        *origname = name;
    else
        VIR_FREE(name);
2900 2901 2902 2903
    return def;
}


2904
static int
2905 2906 2907 2908 2909 2910
qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
                             virDomainObjPtr vm,
                             const char *cookiein,
                             int cookieinlen,
                             unsigned int flags,
                             int retcode)
2911 2912
{
    qemuMigrationCookiePtr mig;
2913
    virObjectEventPtr event;
2914 2915
    int rv = -1;
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
2916 2917
    qemuDomainObjPrivatePtr priv = vm->privateData;
    qemuDomainJobInfoPtr jobInfo = NULL;
2918

2919
    VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
2920
              "flags=0x%x, retcode=%d",
2921
              driver, vm, NULLSTR(cookiein), cookieinlen,
2922 2923 2924 2925 2926 2927 2928 2929 2930
              flags, retcode);

    virCheckFlags(QEMU_MIGRATION_FLAGS, -1);

    qemuMigrationJobSetPhase(driver, vm,
                             retcode == 0
                             ? QEMU_MIGRATION_PHASE_CONFIRM3
                             : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);

2931 2932
    if (!(mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                       cookiein, cookieinlen,
2933
                                       QEMU_MIGRATION_COOKIE_STATS)))
2934 2935
        goto cleanup;

2936 2937 2938
    if (retcode == 0)
        jobInfo = priv->job.completed;
    else
2939
        VIR_FREE(priv->job.completed);
2940 2941 2942

    /* Update times with the values sent by the destination daemon */
    if (mig->jobInfo && jobInfo) {
2943 2944 2945 2946 2947 2948 2949 2950
        int reason;

        /* We need to refresh migration statistics after a completed post-copy
         * migration since priv->job.completed contains obsolete data from the
         * time we switched to post-copy mode.
         */
        if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
            reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
2951 2952
            qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                       jobInfo, NULL) < 0)
2953 2954
            VIR_WARN("Could not refresh migration statistics");

2955 2956 2957
        qemuDomainJobInfoUpdateTime(jobInfo);
        jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet;
        jobInfo->timeDelta = mig->jobInfo->timeDelta;
2958 2959
        jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set;
        jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime;
2960 2961
    }

2962 2963 2964
    if (flags & VIR_MIGRATE_OFFLINE)
        goto done;

2965 2966
    /* Did the migration go as planned?  If yes, kill off the domain object.
     * If something failed, resume CPUs, but only if we didn't use post-copy.
2967 2968 2969 2970
     */
    if (retcode == 0) {
        /* If guest uses SPICE and supports seamless migration we have to hold
         * up domain shutdown until SPICE server transfers its data */
2971
        qemuMigrationSrcWaitForSpice(vm);
2972 2973

        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
2974
                        QEMU_ASYNC_JOB_MIGRATION_OUT,
2975 2976 2977
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
        virDomainAuditStop(vm, "migrated");

2978
        event = virDomainEventLifecycleNewFromObj(vm,
2979 2980
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
2981
        virObjectEventStateQueue(driver->domainEventState, event);
2982
        qemuDomainEventEmitJobCompleted(driver, vm);
2983
    } else {
2984
        virErrorPtr orig_err = virSaveLastError();
2985
        int reason;
2986 2987

        /* cancel any outstanding NBD jobs */
2988 2989
        qemuMigrationSrcNBDCopyCancel(driver, vm, false,
                                      QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
2990 2991 2992

        virSetError(orig_err);
        virFreeError(orig_err);
2993

2994
        if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
2995
            reason == VIR_DOMAIN_PAUSED_POSTCOPY)
2996
            qemuMigrationAnyPostcopyFailed(driver, vm);
2997 2998
        else
            qemuMigrationSrcRestoreDomainState(driver, vm);
2999

3000
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
3001
                                 priv->job.migParams, priv->job.apiFlags);
3002

3003
        if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
3004 3005 3006
            VIR_WARN("Failed to save status on vm %s", vm->def->name);
    }

3007
 done:
3008 3009 3010
    qemuMigrationCookieFree(mig);
    rv = 0;

3011
 cleanup:
3012 3013 3014 3015 3016
    virObjectUnref(cfg);
    return rv;
}

int
3017 3018 3019 3020 3021 3022
qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        const char *cookiein,
                        int cookieinlen,
                        unsigned int flags,
                        int cancelled)
3023
{
3024
    qemuMigrationJobPhase phase;
3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
    virQEMUDriverConfigPtr cfg = NULL;
    int ret = -1;

    cfg = virQEMUDriverGetConfig(driver);

    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
        goto cleanup;

    if (cancelled)
        phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
    else
        phase = QEMU_MIGRATION_PHASE_CONFIRM3;

    qemuMigrationJobStartPhase(driver, vm, phase);
3039
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
3040
                           qemuMigrationSrcCleanup);
3041

3042 3043 3044
    ret = qemuMigrationSrcConfirmPhase(driver, vm,
                                       cookiein, cookieinlen,
                                       flags, cancelled);
3045

3046
    qemuMigrationJobFinish(driver, vm);
3047
    if (!virDomainObjIsActive(vm)) {
3048
        if (!cancelled && ret == 0 && flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
3049
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
3050 3051
            vm->persistent = 0;
        }
3052
        qemuDomainRemoveInactiveJob(driver, vm);
3053 3054
    }

3055
 cleanup:
M
Michal Privoznik 已提交
3056
    virDomainObjEndAPI(&vm);
3057 3058 3059 3060 3061
    virObjectUnref(cfg);
    return ret;
}


3062 3063
enum qemuMigrationDestinationType {
    MIGRATION_DEST_HOST,
3064
    MIGRATION_DEST_CONNECT_HOST,
3065
    MIGRATION_DEST_FD,
3066
};
3067

3068 3069 3070 3071
enum qemuMigrationForwardType {
    MIGRATION_FWD_DIRECT,
    MIGRATION_FWD_STREAM,
};
3072

3073 3074 3075 3076 3077 3078
typedef struct _qemuMigrationSpec qemuMigrationSpec;
typedef qemuMigrationSpec *qemuMigrationSpecPtr;
struct _qemuMigrationSpec {
    enum qemuMigrationDestinationType destType;
    union {
        struct {
3079
            const char *protocol;
3080 3081 3082 3083
            const char *name;
            int port;
        } host;

3084 3085 3086 3087
        struct {
            int qemu;
            int local;
        } fd;
3088 3089 3090 3091 3092 3093 3094
    } dest;

    enum qemuMigrationForwardType fwdType;
    union {
        virStreamPtr stream;
    } fwd;
};
3095 3096 3097

#define TUNNEL_SEND_BUF_SIZE 65536

3098 3099 3100 3101 3102 3103 3104
typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
struct _qemuMigrationIOThread {
    virThread thread;
    virStreamPtr st;
    int sock;
    virError err;
3105 3106
    int wakeupRecvFD;
    int wakeupSendFD;
3107 3108
};

3109
static void qemuMigrationSrcIOFunc(void *arg)
3110
{
3111
    qemuMigrationIOThreadPtr data = arg;
3112 3113 3114 3115 3116 3117 3118
    char *buffer = NULL;
    struct pollfd fds[2];
    int timeout = -1;
    virErrorPtr err = NULL;

    VIR_DEBUG("Running migration tunnel; stream=%p, sock=%d",
              data->st, data->sock);
3119

3120
    if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0)
3121
        goto abrt;
3122

3123 3124 3125
    fds[0].fd = data->sock;
    fds[1].fd = data->wakeupRecvFD;

3126
    for (;;) {
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136
        int ret;

        fds[0].events = fds[1].events = POLLIN;
        fds[0].revents = fds[1].revents = 0;

        ret = poll(fds, ARRAY_CARDINALITY(fds), timeout);

        if (ret < 0) {
            if (errno == EAGAIN || errno == EINTR)
                continue;
3137
            virReportSystemError(errno, "%s",
3138 3139
                                 _("poll failed in migration tunnel"));
            goto abrt;
3140
        }
3141 3142 3143 3144 3145 3146 3147

        if (ret == 0) {
            /* We were asked to gracefully stop but reading would block. This
             * can only happen if qemu told us migration finished but didn't
             * close the migration fd. We handle this in the same way as EOF.
             */
            VIR_DEBUG("QEMU forgot to close migration fd");
3148
            break;
3149
        }
3150

3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166
        if (fds[1].revents & (POLLIN | POLLERR | POLLHUP)) {
            char stop = 0;

            if (saferead(data->wakeupRecvFD, &stop, 1) != 1) {
                virReportSystemError(errno, "%s",
                                     _("failed to read from wakeup fd"));
                goto abrt;
            }

            VIR_DEBUG("Migration tunnel was asked to %s",
                      stop ? "abort" : "finish");
            if (stop) {
                goto abrt;
            } else {
                timeout = 0;
            }
3167 3168
        }

3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
        if (fds[0].revents & (POLLIN | POLLERR | POLLHUP)) {
            int nbytes;

            nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
            if (nbytes > 0) {
                if (virStreamSend(data->st, buffer, nbytes) < 0)
                    goto error;
            } else if (nbytes < 0) {
                virReportSystemError(errno, "%s",
                        _("tunnelled migration failed to read from qemu"));
                goto abrt;
            } else {
                /* EOF; get out of here */
                break;
            }
        }
    }
3186

3187 3188
    if (virStreamFinish(data->st) < 0)
        goto error;
3189

3190
    VIR_FORCE_CLOSE(data->sock);
3191 3192
    VIR_FREE(buffer);

3193 3194
    return;

3195
 abrt:
3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206
    err = virSaveLastError();
    if (err && err->code == VIR_ERR_OK) {
        virFreeError(err);
        err = NULL;
    }
    virStreamAbort(data->st);
    if (err) {
        virSetError(err);
        virFreeError(err);
    }

3207
 error:
3208 3209 3210 3211 3212
    /* Let the source qemu know that the transfer cant continue anymore.
     * Don't copy the error for EPIPE as destination has the actual error. */
    VIR_FORCE_CLOSE(data->sock);
    if (!virLastErrorIsSystemErrno(EPIPE))
        virCopyLastError(&data->err);
3213
    virResetLastError();
3214
    VIR_FREE(buffer);
3215 3216 3217 3218
}


static qemuMigrationIOThreadPtr
3219 3220
qemuMigrationSrcStartTunnel(virStreamPtr st,
                            int sock)
3221
{
3222 3223
    qemuMigrationIOThreadPtr io = NULL;
    int wakeupFD[2] = { -1, -1 };
3224

3225 3226 3227 3228
    if (pipe2(wakeupFD, O_CLOEXEC) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to make pipe"));
        goto error;
3229 3230
    }

3231
    if (VIR_ALLOC(io) < 0)
3232
        goto error;
3233

3234 3235
    io->st = st;
    io->sock = sock;
3236 3237
    io->wakeupRecvFD = wakeupFD[0];
    io->wakeupSendFD = wakeupFD[1];
3238 3239

    if (virThreadCreate(&io->thread, true,
3240
                        qemuMigrationSrcIOFunc,
3241 3242 3243
                        io) < 0) {
        virReportSystemError(errno, "%s",
                             _("Unable to create migration thread"));
3244
        goto error;
3245 3246 3247
    }

    return io;
3248

3249
 error:
3250 3251 3252 3253
    VIR_FORCE_CLOSE(wakeupFD[0]);
    VIR_FORCE_CLOSE(wakeupFD[1]);
    VIR_FREE(io);
    return NULL;
3254 3255 3256
}

static int
3257
qemuMigrationSrcStopTunnel(qemuMigrationIOThreadPtr io, bool error)
3258 3259
{
    int rv = -1;
3260 3261 3262 3263 3264 3265 3266 3267 3268
    char stop = error ? 1 : 0;

    /* make sure the thread finishes its job and is joinable */
    if (safewrite(io->wakeupSendFD, &stop, 1) != 1) {
        virReportSystemError(errno, "%s",
                             _("failed to wakeup migration tunnel"));
        goto cleanup;
    }

3269 3270 3271 3272
    virThreadJoin(&io->thread);

    /* Forward error from the IO thread, to this thread */
    if (io->err.code != VIR_ERR_OK) {
3273 3274 3275 3276
        if (error)
            rv = 0;
        else
            virSetError(&io->err);
3277 3278 3279 3280 3281 3282
        virResetError(&io->err);
        goto cleanup;
    }

    rv = 0;

3283
 cleanup:
3284 3285
    VIR_FORCE_CLOSE(io->wakeupSendFD);
    VIR_FORCE_CLOSE(io->wakeupRecvFD);
3286 3287
    VIR_FREE(io);
    return rv;
3288 3289
}

3290
static int
3291 3292 3293
qemuMigrationSrcConnect(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        qemuMigrationSpecPtr spec)
3294 3295 3296 3297 3298 3299 3300
{
    virNetSocketPtr sock;
    const char *host;
    char *port = NULL;
    int ret = -1;

    host = spec->dest.host.name;
3301
    if (virAsprintf(&port, "%d", spec->dest.host.port) < 0)
3302 3303 3304 3305 3306
        return -1;

    spec->destType = MIGRATION_DEST_FD;
    spec->dest.fd.qemu = -1;

3307
    if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
3308
        goto cleanup;
3309 3310 3311
    if (virNetSocketNewConnectTCP(host, port,
                                  AF_UNSPEC,
                                  &sock) == 0) {
3312
        spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
3313
        virObjectUnref(sock);
3314
    }
3315
    if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0 ||
3316 3317 3318
        spec->dest.fd.qemu == -1)
        goto cleanup;

3319 3320 3321 3322 3323 3324 3325
    /* Migration expects a blocking FD */
    if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
        virReportSystemError(errno, _("Unable to set FD %d blocking"),
                             spec->dest.fd.qemu);
        goto cleanup;
    }

3326 3327
    ret = 0;

3328
 cleanup:
3329 3330 3331 3332 3333 3334
    VIR_FREE(port);
    if (ret < 0)
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
    return ret;
}

3335 3336

static int
3337 3338 3339 3340
qemuMigrationSrcContinue(virQEMUDriverPtr driver,
                         virDomainObjPtr vm,
                         qemuMonitorMigrationStatus status,
                         qemuDomainAsyncJob asyncJob)
3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int ret;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    ret = qemuMonitorMigrateContinue(priv->mon, status);

    if (qemuDomainObjExitMonitor(driver, vm) < 0)
        ret = -1;

    return ret;
}


3357
static int
3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
qemuMigrationSrcRun(virQEMUDriverPtr driver,
                    virDomainObjPtr vm,
                    const char *persist_xml,
                    const char *cookiein,
                    int cookieinlen,
                    char **cookieout,
                    int *cookieoutlen,
                    unsigned long flags,
                    unsigned long resource,
                    qemuMigrationSpecPtr spec,
                    virConnectPtr dconn,
                    const char *graphicsuri,
                    size_t nmigrate_disks,
                    const char **migrate_disks,
3372
                    qemuMigrationParamsPtr migParams)
3373
{
3374
    int ret = -1;
3375 3376
    unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
    qemuDomainObjPrivatePtr priv = vm->privateData;
3377
    qemuMigrationCookiePtr mig = NULL;
3378
    char *tlsAlias = NULL;
3379
    qemuMigrationIOThreadPtr iothread = NULL;
3380
    int fd = -1;
3381
    unsigned long migrate_speed = resource ? resource : priv->migMaxBandwidth;
3382
    virErrorPtr orig_err = NULL;
3383
    unsigned int cookieFlags = 0;
3384
    bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
3385
    bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
3386
    bool cancel = false;
3387
    unsigned int waitFlags;
3388
    virDomainDefPtr persistDef = NULL;
3389
    char *timestamp;
3390
    int rc;
3391 3392

    VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
3393
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3394 3395
              "spec=%p (dest=%d, fwd=%d), dconn=%p, graphicsuri=%s, "
              "nmigrate_disks=%zu, migrate_disks=%p",
3396 3397
              driver, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, resource,
3398
              spec, spec->destType, spec->fwdType, dconn,
3399
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3400

3401 3402 3403 3404 3405 3406 3407 3408 3409 3410
    if (flags & VIR_MIGRATE_NON_SHARED_DISK) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

    if (flags & VIR_MIGRATE_NON_SHARED_INC) {
        migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
        cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
    }

3411 3412
    if (virLockManagerPluginUsesState(driver->lockManager) &&
        !cookieout) {
3413 3414 3415 3416
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("Migration with lock driver %s requires"
                         " cookie support"),
                       virLockManagerPluginGetName(driver->lockManager));
3417 3418 3419
        return -1;
    }

3420 3421 3422
    if (events)
        priv->signalIOError = abort_on_error;

3423 3424
    if (flags & VIR_MIGRATE_PERSIST_DEST) {
        if (persist_xml) {
3425 3426
            if (!(persistDef = qemuMigrationAnyPrepareDef(driver, persist_xml,
                                                          NULL, NULL)))
3427
                goto error;
3428 3429 3430
        } else {
            virDomainDefPtr def = vm->newDef ? vm->newDef : vm->def;
            if (!(persistDef = qemuDomainDefCopy(driver, def,
3431 3432
                                                 VIR_DOMAIN_XML_SECURE |
                                                 VIR_DOMAIN_XML_MIGRATABLE)))
3433
                goto error;
3434 3435 3436
        }
    }

3437 3438
    mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                 cookiein, cookieinlen,
3439 3440 3441
                                 cookieFlags |
                                 QEMU_MIGRATION_COOKIE_GRAPHICS |
                                 QEMU_MIGRATION_COOKIE_CAPS);
3442
    if (!mig)
3443
        goto error;
3444

3445
    if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
3446 3447
        VIR_WARN("unable to provide data for graphics client relocation");

3448
    if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
3449
                                 migParams, mig->caps->automatic) < 0)
3450 3451
        goto error;

3452
    if (flags & VIR_MIGRATE_TLS) {
3453
        const char *hostname = NULL;
3454 3455 3456 3457

        /* We need to add tls-hostname whenever QEMU itself does not
         * connect directly to the destination. */
        if (spec->destType == MIGRATION_DEST_CONNECT_HOST ||
3458 3459 3460
            spec->destType == MIGRATION_DEST_FD)
            hostname = spec->dest.host.name;

3461
        if (qemuMigrationParamsEnableTLS(driver, vm, false,
3462
                                         QEMU_ASYNC_JOB_MIGRATION_OUT,
3463
                                         &tlsAlias, hostname,
3464 3465
                                         migParams) < 0)
            goto error;
3466
    } else {
3467
        if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
3468
            goto error;
3469 3470
    }

3471 3472 3473 3474
    if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                 migParams) < 0)
        goto error;

3475 3476 3477
    if (migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
                         QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
        if (mig->nbd) {
3478 3479 3480 3481
            /* Currently libvirt does not support setting up of the NBD
             * non-shared storage migration with TLS. As we need to honour the
             * VIR_MIGRATE_TLS flag, we need to reject such migration until
             * we implement TLS for NBD. */
3482 3483
            if (flags & VIR_MIGRATE_TLS &&
                !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_DEL)) {
3484 3485 3486 3487 3488
                virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                               _("NBD migration with TLS is not supported"));
                goto error;
            }

3489
            /* This will update migrate_flags on success */
3490 3491 3492 3493 3494 3495
            if (qemuMigrationSrcNBDStorageCopy(driver, vm, mig,
                                               spec->dest.host.name,
                                               migrate_speed,
                                               &migrate_flags,
                                               nmigrate_disks,
                                               migrate_disks,
3496
                                               dconn, tlsAlias, flags) < 0) {
3497
                goto error;
3498 3499 3500 3501 3502 3503 3504
            }
        } else {
            /* Destination doesn't support NBD server.
             * Fall back to previous implementation. */
            VIR_DEBUG("Destination doesn't support NBD server "
                      "Falling back to previous implementation.");
        }
3505 3506
    }

3507
    /* Before EnterMonitor, since qemuMigrationSetOffline already does that */
3508 3509
    if (!(flags & VIR_MIGRATE_LIVE) &&
        virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
3510
        if (qemuMigrationSrcSetOffline(driver, vm) < 0)
3511
            goto error;
3512 3513
    }

3514 3515
    if (qemuDomainObjEnterMonitorAsync(driver, vm,
                                       QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3516
        goto error;
3517

3518
    if (priv->job.abortJob) {
3519 3520
        /* explicitly do this *after* we entered the monitor,
         * as this is a critical section so we are guaranteed
3521
         * priv->job.abortJob will not change */
3522
        priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
3523 3524 3525
        virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
                       qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
                       _("canceled by client"));
3526
        goto exit_monitor;
3527 3528
    }

3529 3530
    if (qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0)
        goto exit_monitor;
3531

3532 3533
    /* connect to the destination qemu if needed */
    if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
3534
        qemuMigrationSrcConnect(driver, vm, spec) < 0) {
3535
        goto exit_monitor;
3536
    }
3537

3538 3539 3540 3541 3542 3543
    /* log start of migration */
    if ((timestamp = virTimeStringNow()) != NULL) {
        qemuDomainLogAppendMessage(driver, vm, "%s: initiating migration\n", timestamp);
        VIR_FREE(timestamp);
    }

3544
    rc = -1;
3545 3546
    switch (spec->destType) {
    case MIGRATION_DEST_HOST:
M
Michael R. Hines 已提交
3547 3548
        if (STREQ(spec->dest.host.protocol, "rdma") &&
            virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
3549
            goto exit_monitor;
M
Michael R. Hines 已提交
3550
        }
3551 3552 3553 3554
        rc = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
                                      spec->dest.host.protocol,
                                      spec->dest.host.name,
                                      spec->dest.host.port);
3555 3556
        break;

3557 3558 3559 3560
    case MIGRATION_DEST_CONNECT_HOST:
        /* handled above and transformed into MIGRATION_DEST_FD */
        break;

3561
    case MIGRATION_DEST_FD:
3562
        if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3563
            fd = spec->dest.fd.local;
3564 3565
            spec->dest.fd.local = -1;
        }
3566 3567
        rc = qemuMonitorMigrateToFd(priv->mon, migrate_flags,
                                    spec->dest.fd.qemu);
3568 3569
        VIR_FORCE_CLOSE(spec->dest.fd.qemu);
        break;
3570
    }
3571 3572

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
3573
        goto error;
3574 3575 3576

    /* From this point onwards we *must* call cancel to abort the
     * migration on source if anything goes wrong */
3577
    cancel = true;
3578

3579
    if (spec->fwdType != MIGRATION_FWD_DIRECT) {
3580
        if (!(iothread = qemuMigrationSrcStartTunnel(spec->fwd.stream, fd)))
3581
            goto error;
3582 3583 3584 3585 3586
        /* If we've created a tunnel, then the 'fd' will be closed in the
         * qemuMigrationIOFunc as data->sock.
         */
        fd = -1;
    }
3587

3588
    waitFlags = QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
3589 3590 3591 3592 3593 3594 3595
    if (abort_on_error)
        waitFlags |= QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR;
    if (mig->nbd)
        waitFlags |= QEMU_MIGRATION_COMPLETED_CHECK_STORAGE;
    if (flags & VIR_MIGRATE_POSTCOPY)
        waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;

3596 3597 3598
    rc = qemuMigrationSrcWaitForCompletion(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT,
                                           dconn, waitFlags);
3599
    if (rc == -2) {
3600
        goto error;
3601 3602 3603 3604 3605
    } else if (rc == -1) {
        /* QEMU reported failed migration, nothing to cancel anymore */
        cancel = false;
        goto error;
    }
3606

3607 3608 3609
    /* When migration completed, QEMU will have paused the CPUs for us.
     * Wait for the STOP event to be processed or explicitly stop CPUs
     * (for old QEMU which does not send events) to release the lock state.
3610
     */
3611 3612 3613 3614 3615
    if (priv->monJSON) {
        while (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
            priv->signalStop = true;
            rc = virDomainObjWait(vm);
            priv->signalStop = false;
3616
            if (rc < 0)
3617
                goto error;
3618
        }
3619
    } else if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
3620
               qemuMigrationSrcSetOffline(driver, vm) < 0) {
3621
        goto error;
3622
    }
3623

J
Ján Tomko 已提交
3624
    if (mig->nbd &&
3625 3626 3627
        qemuMigrationSrcNBDCopyCancel(driver, vm, true,
                                      QEMU_ASYNC_JOB_MIGRATION_OUT,
                                      dconn) < 0)
3628
        goto error;
3629

3630 3631 3632 3633 3634
    /* When migration was paused before serializing device state we need to
     * resume it now once we finished all block jobs and wait for the real
     * end of the migration.
     */
    if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
3635 3636 3637
        if (qemuMigrationSrcContinue(driver, vm,
                                     QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
                                     QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
3638 3639 3640 3641
            goto error;

        waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;

3642 3643 3644
        rc = qemuMigrationSrcWaitForCompletion(driver, vm,
                                               QEMU_ASYNC_JOB_MIGRATION_OUT,
                                               dconn, waitFlags);
3645 3646 3647 3648 3649 3650 3651 3652 3653
        if (rc == -2) {
            goto error;
        } else if (rc == -1) {
            /* QEMU reported failed migration, nothing to cancel anymore */
            cancel = false;
            goto error;
        }
    }

3654 3655 3656 3657
    if (iothread) {
        qemuMigrationIOThreadPtr io;

        VIR_STEAL_PTR(io, iothread);
3658
        if (qemuMigrationSrcStopTunnel(io, false) < 0)
3659
            goto error;
3660 3661 3662
    }

    if (priv->job.completed) {
3663
        priv->job.completed->stopped = priv->job.current->stopped;
3664 3665 3666 3667 3668 3669 3670 3671 3672
        qemuDomainJobInfoUpdateTime(priv->job.completed);
        qemuDomainJobInfoUpdateDowntime(priv->job.completed);
        ignore_value(virTimeMillisNow(&priv->job.completed->sent));
    }

    cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK |
                   QEMU_MIGRATION_COOKIE_STATS;

    if (qemuMigrationCookieAddPersistent(mig, &persistDef) < 0 ||
3673 3674 3675
        qemuMigrationBakeCookie(mig, driver, vm,
                                QEMU_MIGRATION_SOURCE,
                                cookieout, cookieoutlen, cookieFlags) < 0) {
3676 3677
        VIR_WARN("Unable to encode migration cookie");
    }
3678

3679
    ret = 0;
3680

3681
 cleanup:
3682
    VIR_FREE(tlsAlias);
3683
    VIR_FORCE_CLOSE(fd);
3684
    virDomainDefFree(persistDef);
3685 3686
    qemuMigrationCookieFree(mig);

3687 3688 3689
    if (events)
        priv->signalIOError = false;

3690 3691 3692 3693 3694
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

3695 3696
    return ret;

3697
 error:
3698 3699
    orig_err = virSaveLastError();

3700 3701 3702 3703 3704 3705 3706 3707
    if (virDomainObjIsActive(vm)) {
        if (cancel &&
            priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED &&
            qemuDomainObjEnterMonitorAsync(driver, vm,
                                           QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
            qemuMonitorMigrateCancel(priv->mon);
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
        }
3708

3709 3710
        /* cancel any outstanding NBD jobs */
        if (mig && mig->nbd)
3711 3712 3713
            qemuMigrationSrcNBDCopyCancel(driver, vm, false,
                                          QEMU_ASYNC_JOB_MIGRATION_OUT,
                                          dconn);
3714 3715 3716 3717

        if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
            priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
    }
3718 3719

    if (iothread)
3720
        qemuMigrationSrcStopTunnel(iothread, true);
3721 3722 3723

    goto cleanup;

3724 3725
 exit_monitor:
    ignore_value(qemuDomainObjExitMonitor(driver, vm));
3726
    goto error;
3727 3728
}

3729
/* Perform migration using QEMU's native migrate support,
3730 3731
 * not encrypted obviously
 */
3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746
static int
qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              const char *persist_xml,
                              const char *uri,
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              unsigned long flags,
                              unsigned long resource,
                              virConnectPtr dconn,
                              const char *graphicsuri,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
3747
                              qemuMigrationParamsPtr migParams)
3748
{
3749
    qemuDomainObjPrivatePtr priv = vm->privateData;
M
Martin Kletzander 已提交
3750
    virURIPtr uribits = NULL;
3751
    int ret = -1;
3752 3753 3754
    qemuMigrationSpec spec;

    VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
3755
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3756
              "graphicsuri=%s, nmigrate_disks=%zu migrate_disks=%p",
3757
              driver, vm, uri, NULLSTR(cookiein), cookieinlen,
3758
              cookieout, cookieoutlen, flags, resource,
3759
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3760

3761
    if (!(uribits = qemuMigrationAnyParseURI(uri, NULL)))
3762 3763
        return -1;

3764 3765 3766 3767 3768 3769 3770
    if (uribits->scheme == NULL) {
        virReportError(VIR_ERR_INTERNAL_ERROR,
                       _("missing scheme in migration URI: %s"),
                       uri);
        goto cleanup;
    }

M
Michael R. Hines 已提交
3771 3772 3773 3774 3775
    if (STREQ(uribits->scheme, "rdma")) {
        if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
            virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
                           _("outgoing RDMA migration is not supported "
                             "with this QEMU binary"));
3776
            goto cleanup;
M
Michael R. Hines 已提交
3777
        }
3778
        if (!virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
M
Michael R. Hines 已提交
3779 3780 3781
            virReportError(VIR_ERR_OPERATION_INVALID, "%s",
                           _("cannot start RDMA migration with no memory hard "
                             "limit set"));
3782
            goto cleanup;
M
Michael R. Hines 已提交
3783 3784 3785
        }
    }

3786
    if (STRNEQ(uribits->scheme, "rdma"))
3787 3788
        spec.destType = MIGRATION_DEST_CONNECT_HOST;
    else
3789
        spec.destType = MIGRATION_DEST_HOST;
3790
    spec.dest.host.protocol = uribits->scheme;
3791 3792 3793
    spec.dest.host.name = uribits->server;
    spec.dest.host.port = uribits->port;
    spec.fwdType = MIGRATION_FWD_DIRECT;
3794

3795 3796 3797
    ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
                              cookieoutlen, flags, resource, &spec, dconn,
                              graphicsuri, nmigrate_disks, migrate_disks,
3798
                              migParams);
3799 3800 3801 3802

    if (spec.destType == MIGRATION_DEST_FD)
        VIR_FORCE_CLOSE(spec.dest.fd.qemu);

3803
 cleanup:
3804
    virURIFree(uribits);
3805 3806 3807 3808 3809

    return ret;
}


3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824
static int
qemuMigrationSrcPerformTunnel(virQEMUDriverPtr driver,
                              virDomainObjPtr vm,
                              virStreamPtr st,
                              const char *persist_xml,
                              const char *cookiein,
                              int cookieinlen,
                              char **cookieout,
                              int *cookieoutlen,
                              unsigned long flags,
                              unsigned long resource,
                              virConnectPtr dconn,
                              const char *graphicsuri,
                              size_t nmigrate_disks,
                              const char **migrate_disks,
3825
                              qemuMigrationParamsPtr migParams)
3826 3827 3828
{
    int ret = -1;
    qemuMigrationSpec spec;
3829
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
3830
    int fds[2] = { -1, -1 };
3831 3832

    VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
3833
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
3834
              "graphicsuri=%s, nmigrate_disks=%zu, migrate_disks=%p",
3835
              driver, vm, st, NULLSTR(cookiein), cookieinlen,
3836
              cookieout, cookieoutlen, flags, resource,
3837
              NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
3838 3839 3840 3841

    spec.fwdType = MIGRATION_FWD_STREAM;
    spec.fwd.stream = st;

3842

3843 3844 3845
    spec.destType = MIGRATION_DEST_FD;
    spec.dest.fd.qemu = -1;
    spec.dest.fd.local = -1;
3846

3847 3848 3849 3850 3851
    if (pipe2(fds, O_CLOEXEC) == 0) {
        spec.dest.fd.qemu = fds[1];
        spec.dest.fd.local = fds[0];
    }
    if (spec.dest.fd.qemu == -1 ||
3852 3853
        qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
                                    spec.dest.fd.qemu) < 0) {
3854 3855 3856
        virReportSystemError(errno, "%s",
                             _("cannot create pipe for tunnelled migration"));
        goto cleanup;
3857 3858
    }

3859 3860 3861
    ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen,
                              cookieout, cookieoutlen, flags, resource, &spec,
                              dconn, graphicsuri, nmigrate_disks, migrate_disks,
3862
                              migParams);
3863

3864
 cleanup:
3865 3866
    VIR_FORCE_CLOSE(spec.dest.fd.qemu);
    VIR_FORCE_CLOSE(spec.dest.fd.local);
3867

3868
    virObjectUnref(cfg);
3869 3870 3871 3872
    return ret;
}


3873 3874 3875 3876
/* This is essentially a re-impl of virDomainMigrateVersion2
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
3877 3878 3879 3880 3881 3882 3883 3884
static int
qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver,
                                  virConnectPtr sconn,
                                  virConnectPtr dconn,
                                  virDomainObjPtr vm,
                                  const char *dconnuri,
                                  unsigned long flags,
                                  const char *dname,
3885 3886
                                  unsigned long resource,
                                  qemuMigrationParamsPtr migParams)
3887 3888 3889
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
3890
    char *cookie = NULL;
3891 3892 3893
    char *dom_xml = NULL;
    int cookielen = 0, ret;
    virErrorPtr orig_err = NULL;
3894
    bool cancelled;
3895
    virStreamPtr st = NULL;
3896
    unsigned long destflags;
3897

3898
    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
3899
              "flags=0x%lx, dname=%s, resource=%lu",
3900 3901
              driver, sconn, dconn, vm, NULLSTR(dconnuri),
              flags, NULLSTR(dname), resource);
3902

3903 3904 3905 3906 3907
    /* In version 2 of the protocol, the prepare step is slightly
     * different.  We fetch the domain XML of the source domain
     * and pass it to Prepare2.
     */
    if (!(dom_xml = qemuDomainFormatXML(driver, vm,
3908 3909
                                        QEMU_DOMAIN_FORMAT_LIVE_FLAGS |
                                        VIR_DOMAIN_XML_MIGRATABLE)))
3910 3911 3912 3913 3914
        return -1;

    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

3915 3916
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
3917

3918 3919 3920 3921 3922 3923 3924 3925 3926 3927
    VIR_DEBUG("Prepare2 %p", dconn);
    if (flags & VIR_MIGRATE_TUNNELLED) {
        /*
         * Tunnelled Migrate Version 2 does not support cookies
         * due to missing parameters in the prepareTunnel() API.
         */

        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

3928
        qemuDomainObjEnterRemote(vm);
3929
        ret = dconn->driver->domainMigratePrepareTunnel
3930
            (dconn, st, destflags, dname, resource, dom_xml);
3931 3932
        if (qemuDomainObjExitRemote(vm, true) < 0)
            goto cleanup;
3933
    } else {
3934
        qemuDomainObjEnterRemote(vm);
3935 3936
        ret = dconn->driver->domainMigratePrepare2
            (dconn, &cookie, &cookielen, NULL, &uri_out,
3937
             destflags, dname, resource, dom_xml);
3938 3939
        if (qemuDomainObjExitRemote(vm, true) < 0)
            goto cleanup;
3940 3941 3942
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
3943 3944
        goto cleanup;

3945 3946
    if (!(flags & VIR_MIGRATE_TUNNELLED) &&
        (uri_out == NULL)) {
3947 3948
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare2 did not set uri"));
3949
        cancelled = true;
3950
        orig_err = virSaveLastError();
3951
        goto finish;
3952 3953
    }

3954 3955 3956 3957
    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete.
     */
    VIR_DEBUG("Perform %p", sconn);
3958
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
3959
    if (flags & VIR_MIGRATE_TUNNELLED)
3960 3961 3962
        ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL,
                                            NULL, 0, NULL, NULL,
                                            flags, resource, dconn,
3963
                                            NULL, 0, NULL, migParams);
3964
    else
3965 3966 3967 3968
        ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri_out,
                                            cookie, cookielen,
                                            NULL, NULL, /* No out cookie with v2 migration */
                                            flags, resource, dconn, NULL, 0, NULL,
3969
                                            migParams);
3970 3971 3972 3973

    /* Perform failed. Make sure Finish doesn't overwrite the error */
    if (ret < 0)
        orig_err = virSaveLastError();
3974

3975 3976 3977
    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
3978
    cancelled = ret < 0;
3979

3980
 finish:
3981 3982 3983 3984
    /* In version 2 of the migration protocol, we pass the
     * status code from the sender to the destination host,
     * so it can do any cleanup if the migration failed.
     */
3985
    dname = dname ? dname : vm->def->name;
3986
    VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
3987
    qemuDomainObjEnterRemote(vm);
3988
    ddomain = dconn->driver->domainMigrateFinish2
3989
        (dconn, dname, cookie, cookielen,
3990
         uri_out ? uri_out : dconnuri, destflags, cancelled);
3991 3992
    /* The domain is already gone at this point */
    ignore_value(qemuDomainObjExitRemote(vm, false));
3993 3994
    if (cancelled && ddomain)
        VIR_ERROR(_("finish step ignored that migration was cancelled"));
3995

3996
 cleanup:
3997
    if (ddomain) {
3998
        virObjectUnref(ddomain);
3999 4000 4001 4002
        ret = 0;
    } else {
        ret = -1;
    }
4003

4004
    virObjectUnref(st);
4005 4006 4007 4008 4009 4010

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
4011
    VIR_FREE(cookie);
4012 4013

    return ret;
4014 4015 4016
}


4017 4018 4019 4020
/* This is essentially a re-impl of virDomainMigrateVersion3
 * from libvirt.c, but running in source libvirtd context,
 * instead of client app context & also adding in tunnel
 * handling */
4021
static int
4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035
qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver,
                                  virConnectPtr sconn,
                                  virConnectPtr dconn,
                                  const char *dconnuri,
                                  virDomainObjPtr vm,
                                  const char *xmlin,
                                  const char *persist_xml,
                                  const char *dname,
                                  const char *uri,
                                  const char *graphicsuri,
                                  const char *listenAddress,
                                  size_t nmigrate_disks,
                                  const char **migrate_disks,
                                  int nbdPort,
4036
                                  qemuMigrationParamsPtr migParams,
4037 4038 4039
                                  unsigned long long bandwidth,
                                  bool useParams,
                                  unsigned long flags)
4040 4041 4042 4043 4044 4045 4046 4047 4048 4049
{
    virDomainPtr ddomain = NULL;
    char *uri_out = NULL;
    char *cookiein = NULL;
    char *cookieout = NULL;
    char *dom_xml = NULL;
    int cookieinlen = 0;
    int cookieoutlen = 0;
    int ret = -1;
    virErrorPtr orig_err = NULL;
4050
    bool cancelled = true;
4051
    virStreamPtr st = NULL;
4052
    unsigned long destflags;
4053 4054 4055
    virTypedParameterPtr params = NULL;
    int nparams = 0;
    int maxparams = 0;
4056
    size_t i;
4057
    bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
4058 4059

    VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, dconnuri=%s, vm=%p, xmlin=%s, "
4060
              "dname=%s, uri=%s, graphicsuri=%s, listenAddress=%s, "
4061
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
4062
              "bandwidth=%llu, useParams=%d, flags=0x%lx",
4063
              driver, sconn, dconn, NULLSTR(dconnuri), vm, NULLSTR(xmlin),
4064
              NULLSTR(dname), NULLSTR(uri), NULLSTR(graphicsuri),
4065
              NULLSTR(listenAddress), nmigrate_disks, migrate_disks, nbdPort,
4066
              bandwidth, useParams, flags);
4067

4068 4069 4070 4071 4072
    /* Unlike the virDomainMigrateVersion3 counterpart, we don't need
     * to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
     * bit here, because we are already running inside the context of
     * a single job.  */

4073 4074 4075
    dom_xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
                                         &cookieout, &cookieoutlen,
                                         nmigrate_disks, migrate_disks, flags);
4076 4077 4078
    if (!dom_xml)
        goto cleanup;

4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098
    if (useParams) {
        if (virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_XML, dom_xml) < 0)
            goto cleanup;

        if (dname &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
            goto cleanup;

        if (uri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_URI, uri) < 0)
            goto cleanup;

        if (bandwidth &&
            virTypedParamsAddULLong(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_BANDWIDTH,
                                    bandwidth) < 0)
            goto cleanup;
4099 4100 4101 4102 4103 4104

        if (graphicsuri &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_GRAPHICS_URI,
                                    graphicsuri) < 0)
            goto cleanup;
4105 4106 4107 4108 4109
        if (listenAddress &&
            virTypedParamsAddString(&params, &nparams, &maxparams,
                                    VIR_MIGRATE_PARAM_LISTEN_ADDRESS,
                                    listenAddress) < 0)
            goto cleanup;
4110 4111 4112 4113 4114
        for (i = 0; i < nmigrate_disks; i++)
            if (virTypedParamsAddString(&params, &nparams, &maxparams,
                                        VIR_MIGRATE_PARAM_MIGRATE_DISKS,
                                        migrate_disks[i]) < 0)
                goto cleanup;
4115 4116 4117 4118 4119
        if (nbdPort &&
            virTypedParamsAddInt(&params, &nparams, &maxparams,
                                 VIR_MIGRATE_PARAM_DISKS_PORT,
                                 nbdPort) < 0)
            goto cleanup;
4120

4121 4122
        if (qemuMigrationParamsDump(migParams, &params, &nparams,
                                    &maxparams, &flags) < 0)
4123
            goto cleanup;
4124 4125
    }

4126 4127 4128
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
        flags |= VIR_MIGRATE_PAUSED;

4129 4130
    destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
                          VIR_MIGRATE_AUTO_CONVERGE);
4131

4132 4133 4134 4135 4136 4137 4138 4139 4140
    VIR_DEBUG("Prepare3 %p", dconn);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
    if (flags & VIR_MIGRATE_TUNNELLED) {
        if (!(st = virStreamNew(dconn, 0)))
            goto cleanup;

4141
        qemuDomainObjEnterRemote(vm);
4142 4143 4144 4145 4146 4147 4148 4149 4150
        if (useParams) {
            ret = dconn->driver->domainMigratePrepareTunnel3Params
                (dconn, st, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepareTunnel3
                (dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 destflags, dname, bandwidth, dom_xml);
        }
4151 4152
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4153
    } else {
4154
        qemuDomainObjEnterRemote(vm);
4155 4156 4157 4158 4159 4160 4161 4162 4163
        if (useParams) {
            ret = dconn->driver->domainMigratePrepare3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, &uri_out, destflags);
        } else {
            ret = dconn->driver->domainMigratePrepare3
                (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
                 uri, &uri_out, destflags, dname, bandwidth, dom_xml);
        }
4164 4165
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4166 4167 4168 4169 4170
    }
    VIR_FREE(dom_xml);
    if (ret == -1)
        goto cleanup;

4171
    if (offline) {
L
liguang 已提交
4172 4173 4174
        VIR_DEBUG("Offline migration, skipping Perform phase");
        VIR_FREE(cookieout);
        cookieoutlen = 0;
4175
        cancelled = false;
L
liguang 已提交
4176 4177 4178
        goto finish;
    }

4179 4180 4181 4182
    if (uri_out) {
        uri = uri_out;
        if (useParams &&
            virTypedParamsReplaceString(&params, &nparams,
4183 4184
                                        VIR_MIGRATE_PARAM_URI, uri_out) < 0) {
            orig_err = virSaveLastError();
4185
            goto finish;
4186
        }
4187
    } else if (!uri && !(flags & VIR_MIGRATE_TUNNELLED)) {
4188 4189
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("domainMigratePrepare3 did not set uri"));
4190
        orig_err = virSaveLastError();
4191 4192 4193 4194 4195 4196 4197 4198
        goto finish;
    }

    /* Perform the migration.  The driver isn't supposed to return
     * until the migration is complete. The src VM should remain
     * running, but in paused state until the destination can
     * confirm migration completion.
     */
4199
    VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri));
4200
    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4201 4202 4203 4204 4205
    VIR_FREE(cookiein);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
4206
    if (flags & VIR_MIGRATE_TUNNELLED) {
4207 4208 4209 4210
        ret = qemuMigrationSrcPerformTunnel(driver, vm, st, persist_xml,
                                            cookiein, cookieinlen,
                                            &cookieout, &cookieoutlen,
                                            flags, bandwidth, dconn, graphicsuri,
4211
                                            nmigrate_disks, migrate_disks,
4212
                                            migParams);
4213
    } else {
4214 4215 4216 4217
        ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri,
                                            cookiein, cookieinlen,
                                            &cookieout, &cookieoutlen,
                                            flags, bandwidth, dconn, graphicsuri,
4218
                                            nmigrate_disks, migrate_disks,
4219
                                            migParams);
4220
    }
4221 4222

    /* Perform failed. Make sure Finish doesn't overwrite the error */
4223
    if (ret < 0) {
4224
        orig_err = virSaveLastError();
4225 4226 4227 4228
    } else {
        qemuMigrationJobSetPhase(driver, vm,
                                 QEMU_MIGRATION_PHASE_PERFORM3_DONE);
    }
4229 4230 4231 4232

    /* If Perform returns < 0, then we need to cancel the VM
     * startup on the destination
     */
4233
    cancelled = ret < 0;
4234

4235
 finish:
4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247
    /*
     * The status code from the source is passed to the destination.
     * The dest can cleanup in the source indicated it failed to
     * send all migration data. Returns NULL for ddomain if
     * the dest was unable to complete migration.
     */
    VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
    VIR_FREE(cookiein);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260

    if (useParams) {
        if (virTypedParamsGetString(params, nparams,
                                    VIR_MIGRATE_PARAM_DEST_NAME, NULL) <= 0 &&
            virTypedParamsReplaceString(&params, &nparams,
                                        VIR_MIGRATE_PARAM_DEST_NAME,
                                        vm->def->name) < 0) {
            ddomain = NULL;
        } else {
            qemuDomainObjEnterRemote(vm);
            ddomain = dconn->driver->domainMigrateFinish3Params
                (dconn, params, nparams, cookiein, cookieinlen,
                 &cookieout, &cookieoutlen, destflags, cancelled);
4261 4262
            if (qemuDomainObjExitRemote(vm, !offline) < 0)
                goto cleanup;
4263 4264 4265 4266 4267 4268 4269
        }
    } else {
        dname = dname ? dname : vm->def->name;
        qemuDomainObjEnterRemote(vm);
        ddomain = dconn->driver->domainMigrateFinish3
            (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
             dconnuri, uri, destflags, cancelled);
4270 4271
        if (qemuDomainObjExitRemote(vm, !offline) < 0)
            goto cleanup;
4272
    }
4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292

    if (cancelled) {
        if (ddomain) {
            VIR_ERROR(_("finish step ignored that migration was cancelled"));
        } else {
            /* If Finish reported a useful error, use it instead of the
             * original "migration unexpectedly failed" error.
             *
             * This is ugly but we can't do better with the APIs we have. We
             * only replace the error if Finish was called with cancelled == 1
             * and reported a real error (old libvirt would report an error
             * from RPC instead of MIGRATE_FINISH_OK), which only happens when
             * the domain died on destination. To further reduce a possibility
             * of false positives we also check that Perform returned
             * VIR_ERR_OPERATION_FAILED.
             */
            if (orig_err &&
                orig_err->domain == VIR_FROM_QEMU &&
                orig_err->code == VIR_ERR_OPERATION_FAILED) {
                virErrorPtr err = virGetLastError();
4293 4294
                if (err &&
                    err->domain == VIR_FROM_QEMU &&
4295 4296 4297 4298 4299 4300 4301
                    err->code != VIR_ERR_MIGRATE_FINISH_OK) {
                    virFreeError(orig_err);
                    orig_err = NULL;
                }
            }
        }
    }
4302

4303 4304 4305 4306 4307 4308 4309
    /* If ddomain is NULL, then we were unable to start
     * the guest on the target, and must restart on the
     * source. There is a small chance that the ddomain
     * is NULL due to an RPC failure, in which case
     * ddomain could in fact be running on the dest.
     * The lock manager plugins should take care of
     * safety in this scenario.
4310
     */
4311
    cancelled = ddomain == NULL;
4312

4313 4314 4315 4316 4317 4318
    /* If finish3 set an error, and we don't have an earlier
     * one we need to preserve it in case confirm3 overwrites
     */
    if (!orig_err)
        orig_err = virSaveLastError();

4319 4320 4321 4322
    /*
     * If cancelled, then src VM will be restarted, else
     * it will be killed
     */
4323
    VIR_DEBUG("Confirm3 %p cancelled=%d vm=%p", sconn, cancelled, vm);
4324 4325 4326 4327 4328
    VIR_FREE(cookiein);
    cookiein = cookieout;
    cookieinlen = cookieoutlen;
    cookieout = NULL;
    cookieoutlen = 0;
4329 4330 4331
    ret = qemuMigrationSrcConfirmPhase(driver, vm,
                                       cookiein, cookieinlen,
                                       flags, cancelled);
4332 4333 4334 4335
    /* If Confirm3 returns -1, there's nothing more we can
     * do, but fortunately worst case is that there is a
     * domain left in 'paused' state on source.
     */
4336 4337 4338
    if (ret < 0)
        VIR_WARN("Guest %s probably left in 'paused' state on source",
                 vm->def->name);
4339 4340 4341

 cleanup:
    if (ddomain) {
4342
        virObjectUnref(ddomain);
4343 4344 4345 4346 4347
        ret = 0;
    } else {
        ret = -1;
    }

4348
    virObjectUnref(st);
4349 4350 4351 4352 4353 4354 4355 4356

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
    VIR_FREE(uri_out);
    VIR_FREE(cookiein);
    VIR_FREE(cookieout);
4357
    virTypedParamsFree(params, nparams);
4358 4359 4360 4361
    return ret;
}


4362
static void
4363 4364 4365
qemuMigrationSrcConnectionClosed(virConnectPtr conn,
                                 int reason,
                                 void *opaque)
4366 4367 4368 4369 4370 4371 4372 4373
{
    virDomainObjPtr vm = opaque;

    VIR_DEBUG("conn=%p, reason=%d, vm=%s", conn, reason, vm->def->name);
    virDomainObjBroadcast(vm);
}


4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385
static int virConnectCredType[] = {
    VIR_CRED_AUTHNAME,
    VIR_CRED_PASSPHRASE,
};


static virConnectAuth virConnectAuthConfig = {
    .credtype = virConnectCredType,
    .ncredtype = ARRAY_CARDINALITY(virConnectCredType),
};


4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398
static int
qemuMigrationSrcPerformPeer2Peer(virQEMUDriverPtr driver,
                                 virConnectPtr sconn,
                                 virDomainObjPtr vm,
                                 const char *xmlin,
                                 const char *persist_xml,
                                 const char *dconnuri,
                                 const char *uri,
                                 const char *graphicsuri,
                                 const char *listenAddress,
                                 size_t nmigrate_disks,
                                 const char **migrate_disks,
                                 int nbdPort,
4399
                                 qemuMigrationParamsPtr migParams,
4400 4401 4402 4403
                                 unsigned long flags,
                                 const char *dname,
                                 unsigned long resource,
                                 bool *v3proto)
4404 4405 4406 4407
{
    int ret = -1;
    virConnectPtr dconn = NULL;
    bool p2p;
4408
    virErrorPtr orig_err = NULL;
4409
    bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
4410
    bool dstOffline = false;
4411
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4412
    bool useParams;
4413

4414 4415
    VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, uri=%s, "
              "graphicsuri=%s, listenAddress=%s, nmigrate_disks=%zu, "
4416
              "migrate_disks=%p, nbdPort=%d, flags=0x%lx, dname=%s, "
4417
              "resource=%lu",
4418
              driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4419
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4420 4421
              nmigrate_disks, migrate_disks, nbdPort, flags, NULLSTR(dname),
              resource);
4422

4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436
    if (flags & VIR_MIGRATE_TUNNELLED && uri) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("migration URI is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

    if (flags & VIR_MIGRATE_TUNNELLED && listenAddress) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("listen address is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

4437 4438 4439 4440 4441 4442 4443
    if (flags & VIR_MIGRATE_TUNNELLED && nbdPort) {
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("disk port address is not supported by tunnelled "
                         "migration"));
        goto cleanup;
    }

4444 4445 4446 4447
    /* the order of operations is important here; we make sure the
     * destination side is completely setup before we touch the source
     */

4448
    qemuDomainObjEnterRemote(vm);
4449
    dconn = virConnectOpenAuth(dconnuri, &virConnectAuthConfig, 0);
4450 4451 4452
    if (qemuDomainObjExitRemote(vm, !offline) < 0)
        goto cleanup;

4453
    if (dconn == NULL) {
4454
        virReportError(VIR_ERR_OPERATION_FAILED,
4455 4456
                       _("Failed to connect to remote libvirt URI %s: %s"),
                       dconnuri, virGetLastErrorMessage());
4457
        virObjectUnref(cfg);
4458 4459 4460
        return -1;
    }

4461 4462
    if (virConnectSetKeepAlive(dconn, cfg->keepAliveInterval,
                               cfg->keepAliveCount) < 0)
4463 4464
        goto cleanup;

4465
    if (virConnectRegisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed,
4466 4467 4468 4469
                                        vm, NULL) < 0) {
        goto cleanup;
    }

4470
    qemuDomainObjEnterRemote(vm);
4471 4472
    p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                   VIR_DRV_FEATURE_MIGRATION_P2P);
4473
        /* v3proto reflects whether the caller used Perform3, but with
4474
         * p2p migrate, regardless of whether Perform2 or Perform3
4475 4476 4477 4478
         * were used, we decide protocol based on what target supports
         */
    *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                        VIR_DRV_FEATURE_MIGRATION_V3);
4479 4480
    useParams = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                         VIR_DRV_FEATURE_MIGRATION_PARAMS);
4481
    if (offline)
4482 4483
        dstOffline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
                                              VIR_DRV_FEATURE_MIGRATION_OFFLINE);
4484 4485
    if (qemuDomainObjExitRemote(vm, !offline) < 0)
        goto cleanup;
4486

4487
    if (!p2p) {
4488 4489
        virReportError(VIR_ERR_OPERATION_FAILED, "%s",
                       _("Destination libvirt does not support peer-to-peer migration protocol"));
4490 4491 4492
        goto cleanup;
    }

4493 4494
    /* Only xmlin, dname, uri, and bandwidth parameters can be used with
     * old-style APIs. */
4495
    if (!useParams && (graphicsuri || listenAddress || nmigrate_disks)) {
4496 4497 4498 4499 4500 4501
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("Migration APIs with extensible parameters are not "
                         "supported but extended parameters were passed"));
        goto cleanup;
    }

4502
    if (offline && !dstOffline) {
L
liguang 已提交
4503 4504 4505 4506 4507 4508
        virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
                       _("offline migration is not supported by "
                         "the destination host"));
        goto cleanup;
    }

4509 4510 4511 4512 4513 4514 4515
    /* Change protection is only required on the source side (us), and
     * only for v3 migration when begin and perform are separate jobs.
     * But peer-2-peer is already a single job, and we still want to
     * talk to older destinations that would reject the flag.
     * Therefore it is safe to clear the bit here.  */
    flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;

4516
    if (*v3proto) {
4517 4518 4519
        ret = qemuMigrationSrcPerformPeer2Peer3(driver, sconn, dconn, dconnuri, vm, xmlin,
                                                persist_xml, dname, uri, graphicsuri,
                                                listenAddress, nmigrate_disks, migrate_disks,
4520
                                                nbdPort, migParams, resource,
4521
                                                useParams, flags);
4522
    } else {
4523
        ret = qemuMigrationSrcPerformPeer2Peer2(driver, sconn, dconn, vm,
4524
                                                dconnuri, flags, dname, resource,
4525
                                                migParams);
4526
    }
4527

4528
 cleanup:
4529
    orig_err = virSaveLastError();
4530
    qemuDomainObjEnterRemote(vm);
4531
    virConnectUnregisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed);
4532
    virObjectUnref(dconn);
4533
    ignore_value(qemuDomainObjExitRemote(vm, false));
4534 4535 4536 4537
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
4538
    virObjectUnref(cfg);
4539 4540 4541 4542
    return ret;
}


4543 4544 4545 4546 4547 4548
/*
 * This implements perform part of the migration protocol when migration job
 * does not need to be active across several APIs, i.e., peer2peer migration or
 * perform phase of v2 non-peer2peer migration.
 */
static int
4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560
qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
                           virConnectPtr conn,
                           virDomainObjPtr vm,
                           const char *xmlin,
                           const char *persist_xml,
                           const char *dconnuri,
                           const char *uri,
                           const char *graphicsuri,
                           const char *listenAddress,
                           size_t nmigrate_disks,
                           const char **migrate_disks,
                           int nbdPort,
4561
                           qemuMigrationParamsPtr migParams,
4562 4563 4564 4565 4566 4567 4568 4569
                           const char *cookiein,
                           int cookieinlen,
                           char **cookieout,
                           int *cookieoutlen,
                           unsigned long flags,
                           const char *dname,
                           unsigned long resource,
                           bool v3proto)
4570
{
4571
    virObjectEventPtr event = NULL;
4572
    int ret = -1;
4573
    virErrorPtr orig_err = NULL;
4574
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4575
    qemuDomainObjPrivatePtr priv = vm->privateData;
4576

4577 4578
    if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                              flags) < 0)
4579 4580
        goto cleanup;

4581
    if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
4582 4583
        goto endjob;

4584
    if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
4585
        goto endjob;
4586

4587
    if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
4588
        !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
4589
        goto endjob;
4590

4591
    qemuMigrationSrcStoreDomainState(vm);
4592 4593

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
4594 4595 4596
        ret = qemuMigrationSrcPerformPeer2Peer(driver, conn, vm, xmlin, persist_xml,
                                               dconnuri, uri, graphicsuri, listenAddress,
                                               nmigrate_disks, migrate_disks, nbdPort,
4597
                                               migParams, flags, dname, resource,
4598
                                               &v3proto);
4599
    } else {
4600
        qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
4601 4602 4603
        ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
                                            cookieout, cookieoutlen,
                                            flags, resource, NULL, NULL, 0, NULL,
4604
                                            migParams);
4605
    }
4606 4607
    if (ret < 0)
        goto endjob;
4608

4609 4610 4611 4612
    /*
     * In v3 protocol, the source VM is not killed off until the
     * confirm step.
     */
4613
    if (!v3proto) {
4614
        qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
4615
                        QEMU_ASYNC_JOB_MIGRATION_OUT,
4616
                        VIR_QEMU_PROCESS_STOP_MIGRATED);
4617
        virDomainAuditStop(vm, "migrated");
4618
        event = virDomainEventLifecycleNewFromObj(vm,
4619 4620
                                         VIR_DOMAIN_EVENT_STOPPED,
                                         VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
4621 4622
    }

4623
 endjob:
4624 4625 4626
    if (ret < 0)
        orig_err = virSaveLastError();

4627 4628 4629 4630
    /* v2 proto has no confirm phase so we need to reset migration parameters
     * here
     */
    if (!v3proto && ret < 0)
4631
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
4632
                                 priv->job.migParams, priv->job.apiFlags);
4633

4634
    qemuMigrationSrcRestoreDomainState(driver, vm);
4635

4636
    qemuMigrationJobFinish(driver, vm);
4637
    if (!virDomainObjIsActive(vm) && ret == 0) {
4638
        if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
4639
            virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
4640 4641
            vm->persistent = 0;
        }
4642
        qemuDomainRemoveInactiveJob(driver, vm);
4643 4644
    }

4645 4646 4647 4648 4649
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

4650
 cleanup:
M
Michal Privoznik 已提交
4651
    virDomainObjEndAPI(&vm);
4652
    virObjectEventStateQueue(driver->domainEventState, event);
4653
    virObjectUnref(cfg);
4654 4655 4656 4657 4658 4659 4660
    return ret;
}

/*
 * This implements perform phase of v3 migration protocol.
 */
static int
4661 4662 4663 4664 4665 4666 4667 4668
qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
                             virConnectPtr conn,
                             virDomainObjPtr vm,
                             const char *persist_xml,
                             const char *uri,
                             const char *graphicsuri,
                             size_t nmigrate_disks,
                             const char **migrate_disks,
4669
                             qemuMigrationParamsPtr migParams,
4670 4671 4672 4673 4674 4675
                             const char *cookiein,
                             int cookieinlen,
                             char **cookieout,
                             int *cookieoutlen,
                             unsigned long flags,
                             unsigned long resource)
4676
{
4677
    qemuDomainObjPrivatePtr priv = vm->privateData;
4678 4679 4680 4681
    int ret = -1;

    /* If we didn't start the job in the begin phase, start it now. */
    if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
4682 4683
        if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
                                  flags) < 0)
4684 4685 4686 4687 4688 4689
            goto cleanup;
    } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
        goto cleanup;
    }

    qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
4690
    virCloseCallbacksUnset(driver->closeCallbacks, vm,
4691
                           qemuMigrationSrcCleanup);
4692

4693 4694 4695
    ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
                                        cookieout, cookieoutlen,
                                        flags, resource, NULL, graphicsuri,
4696
                                        nmigrate_disks, migrate_disks, migParams);
4697

4698
    if (ret < 0) {
4699
        qemuMigrationSrcRestoreDomainState(driver, vm);
4700
        goto endjob;
4701
    }
4702 4703 4704

    qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);

4705
    if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
4706
                             qemuMigrationSrcCleanup) < 0)
4707 4708
        goto endjob;

4709
 endjob:
4710
    if (ret < 0) {
4711
        qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
4712
                                 priv->job.migParams, priv->job.apiFlags);
4713
        qemuMigrationJobFinish(driver, vm);
4714
    } else {
4715
        qemuMigrationJobContinue(vm);
4716 4717
    }

4718
    if (!virDomainObjIsActive(vm))
4719
        qemuDomainRemoveInactiveJob(driver, vm);
4720

4721
 cleanup:
M
Michal Privoznik 已提交
4722
    virDomainObjEndAPI(&vm);
4723 4724 4725
    return ret;
}

4726
int
4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738
qemuMigrationSrcPerform(virQEMUDriverPtr driver,
                        virConnectPtr conn,
                        virDomainObjPtr vm,
                        const char *xmlin,
                        const char *persist_xml,
                        const char *dconnuri,
                        const char *uri,
                        const char *graphicsuri,
                        const char *listenAddress,
                        size_t nmigrate_disks,
                        const char **migrate_disks,
                        int nbdPort,
4739
                        qemuMigrationParamsPtr migParams,
4740 4741 4742 4743 4744 4745 4746 4747
                        const char *cookiein,
                        int cookieinlen,
                        char **cookieout,
                        int *cookieoutlen,
                        unsigned long flags,
                        const char *dname,
                        unsigned long resource,
                        bool v3proto)
4748 4749
{
    VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
4750
              "uri=%s, graphicsuri=%s, listenAddress=%s, "
4751
              "nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
4752
              "cookiein=%s, cookieinlen=%d, cookieout=%p, cookieoutlen=%p, "
4753
              "flags=0x%lx, dname=%s, resource=%lu, v3proto=%d",
4754
              driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
4755
              NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
4756 4757 4758
              nmigrate_disks, migrate_disks, nbdPort,
              NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen,
              flags, NULLSTR(dname), resource, v3proto);
4759 4760 4761

    if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
        if (cookieinlen) {
4762 4763
            virReportError(VIR_ERR_OPERATION_INVALID,
                           "%s", _("received unexpected cookie with P2P migration"));
4764 4765 4766
            return -1;
        }

4767 4768 4769
        return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
                                          graphicsuri, listenAddress,
                                          nmigrate_disks, migrate_disks, nbdPort,
4770
                                          migParams,
4771 4772 4773
                                          cookiein, cookieinlen,
                                          cookieout, cookieoutlen,
                                          flags, dname, resource, v3proto);
4774 4775
    } else {
        if (dconnuri) {
4776 4777
            virReportError(VIR_ERR_INTERNAL_ERROR,
                           "%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
4778 4779 4780 4781
            return -1;
        }

        if (v3proto) {
4782 4783 4784
            return qemuMigrationSrcPerformPhase(driver, conn, vm, persist_xml, uri,
                                                graphicsuri,
                                                nmigrate_disks, migrate_disks,
4785
                                                migParams,
4786 4787 4788
                                                cookiein, cookieinlen,
                                                cookieout, cookieoutlen,
                                                flags, resource);
4789
        } else {
4790 4791 4792
            return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
                                              uri, graphicsuri, listenAddress,
                                              nmigrate_disks, migrate_disks, nbdPort,
4793
                                              migParams,
4794 4795 4796
                                              cookiein, cookieinlen,
                                              cookieout, cookieoutlen, flags,
                                              dname, resource, v3proto);
4797 4798 4799
        }
    }
}
4800

4801
static int
4802
qemuMigrationDstVPAssociatePortProfiles(virDomainDefPtr def)
4803
{
4804
    size_t i;
4805 4806 4807 4808 4809
    int last_good_net = -1;
    virDomainNetDefPtr net;

    for (i = 0; i < def->nnets; i++) {
        net = def->nets[i];
4810
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4811
            if (virNetDevVPortProfileAssociate(net->ifname,
4812
                                               virDomainNetGetActualVirtPortProfile(net),
4813
                                               &net->mac,
4814
                                               virDomainNetGetActualDirectDev(net),
4815
                                               -1,
4816
                                               def->uuid,
4817 4818
                                               VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
                                               false) < 0) {
4819 4820 4821
                virReportError(VIR_ERR_OPERATION_FAILED,
                               _("Port profile Associate failed for %s"),
                               net->ifname);
4822
                goto err_exit;
4823
            }
4824
            last_good_net = i;
4825
            VIR_DEBUG("Port profile Associate succeeded for %s", net->ifname);
4826

4827
            if (virNetDevMacVLanVPortProfileRegisterCallback(net->ifname, &net->mac,
4828 4829 4830 4831
                                                             virDomainNetGetActualDirectDev(net), def->uuid,
                                                             virDomainNetGetActualVirtPortProfile(net),
                                                             VIR_NETDEV_VPORT_PROFILE_OP_CREATE))
                goto err_exit;
4832 4833 4834
        }
    }

4835
    return 0;
4836

4837
 err_exit:
4838
    for (i = 0; last_good_net != -1 && i <= last_good_net; i++) {
4839
        net = def->nets[i];
4840
        if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
4841
            ignore_value(virNetDevVPortProfileDisassociate(net->ifname,
4842
                                                           virDomainNetGetActualVirtPortProfile(net),
4843
                                                           &net->mac,
4844
                                                           virDomainNetGetActualDirectDev(net),
4845
                                                           -1,
4846
                                                           VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH));
4847 4848
        }
    }
4849
    return -1;
4850 4851 4852
}


J
Jiri Denemark 已提交
4853
static int
4854 4855 4856 4857
qemuMigrationDstPersist(virQEMUDriverPtr driver,
                        virDomainObjPtr vm,
                        qemuMigrationCookiePtr mig,
                        bool ignoreSaveError)
J
Jiri Denemark 已提交
4858 4859 4860 4861
{
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
    virCapsPtr caps = NULL;
    virDomainDefPtr vmdef;
4862 4863
    virDomainDefPtr oldDef = NULL;
    unsigned int oldPersist = vm->persistent;
J
Jiri Denemark 已提交
4864 4865 4866 4867 4868 4869 4870
    virObjectEventPtr event;
    int ret = -1;

    if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
        goto cleanup;

    vm->persistent = 1;
4871 4872
    oldDef = vm->newDef;
    vm->newDef = qemuMigrationCookieGetPersistent(mig);
J
Jiri Denemark 已提交
4873

4874 4875
    if (!(vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm)))
        goto error;
J
Jiri Denemark 已提交
4876

4877 4878
    if (virDomainSaveConfig(cfg->configDir, driver->caps, vmdef) < 0 &&
        !ignoreSaveError)
4879
        goto error;
J
Jiri Denemark 已提交
4880 4881 4882

    event = virDomainEventLifecycleNewFromObj(vm,
                                              VIR_DOMAIN_EVENT_DEFINED,
4883 4884 4885
                                              oldPersist ?
                                              VIR_DOMAIN_EVENT_DEFINED_UPDATED :
                                              VIR_DOMAIN_EVENT_DEFINED_ADDED);
4886
    virObjectEventStateQueue(driver->domainEventState, event);
J
Jiri Denemark 已提交
4887 4888 4889 4890

    ret = 0;

 cleanup:
4891
    virDomainDefFree(oldDef);
J
Jiri Denemark 已提交
4892 4893 4894
    virObjectUnref(caps);
    virObjectUnref(cfg);
    return ret;
4895 4896 4897 4898 4899 4900 4901

 error:
    virDomainDefFree(vm->newDef);
    vm->persistent = oldPersist;
    vm->newDef = oldDef;
    oldDef = NULL;
    goto cleanup;
J
Jiri Denemark 已提交
4902 4903 4904
}


4905
virDomainPtr
4906 4907 4908 4909 4910 4911 4912 4913 4914 4915
qemuMigrationDstFinish(virQEMUDriverPtr driver,
                       virConnectPtr dconn,
                       virDomainObjPtr vm,
                       const char *cookiein,
                       int cookieinlen,
                       char **cookieout,
                       int *cookieoutlen,
                       unsigned long flags,
                       int retcode,
                       bool v3proto)
4916 4917
{
    virDomainPtr dom = NULL;
4918
    qemuMigrationCookiePtr mig = NULL;
4919
    virErrorPtr orig_err = NULL;
4920
    int cookie_flags = 0;
J
Jiri Denemark 已提交
4921
    qemuDomainObjPrivatePtr priv = vm->privateData;
4922
    virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
4923
    unsigned short port;
4924 4925
    unsigned long long timeReceived = 0;
    virObjectEventPtr event;
4926
    qemuDomainJobInfoPtr jobInfo = NULL;
4927
    bool inPostCopy = false;
4928
    bool doKill = true;
4929

4930
    VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
4931
              "cookieout=%p, cookieoutlen=%p, flags=0x%lx, retcode=%d",
4932 4933
              driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
              cookieout, cookieoutlen, flags, retcode);
4934

4935 4936 4937
    port = priv->migrationPort;
    priv->migrationPort = 0;

4938
    if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
4939
        qemuMigrationDstErrorReport(driver, vm->def->name);
4940
        goto cleanup;
4941
    }
4942

4943 4944
    ignore_value(virTimeMillisNow(&timeReceived));

4945 4946 4947
    qemuMigrationJobStartPhase(driver, vm,
                               v3proto ? QEMU_MIGRATION_PHASE_FINISH3
                                       : QEMU_MIGRATION_PHASE_FINISH2);
4948

4949
    qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
4950
    VIR_FREE(priv->job.completed);
4951

4952
    cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
4953 4954
                   QEMU_MIGRATION_COOKIE_STATS |
                   QEMU_MIGRATION_COOKIE_NBD;
4955 4956 4957
    /* Some older versions of libvirt always send persistent XML in the cookie
     * even though VIR_MIGRATE_PERSIST_DEST was not used. */
    cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
4958

4959 4960
    if (!(mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv,
                                       cookiein, cookieinlen, cookie_flags)))
4961
        goto endjob;
4962

4963
    if (flags & VIR_MIGRATE_OFFLINE) {
4964
        if (retcode == 0 &&
4965
            qemuMigrationDstPersist(driver, vm, mig, false) == 0)
4966
            dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, -1);
4967 4968
        goto endjob;
    }
4969

4970 4971 4972 4973
    if (retcode != 0) {
        /* Check for a possible error on the monitor in case Finish was called
         * earlier than monitor EOF handler got a chance to process the error
         */
4974
        qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
4975 4976
        goto endjob;
    }
4977

4978 4979 4980
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
4981
        qemuMigrationDstErrorReport(driver, vm->def->name);
4982 4983
        goto endjob;
    }
4984

4985
    if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
4986
        goto endjob;
4987

4988
    if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0)
4989
        VIR_WARN("unable to provide network data for relocation");
4990

4991
    if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0)
4992
        goto endjob;
4993

4994 4995
    if (qemuRefreshVirtioChannelState(driver, vm,
                                      QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
4996 4997
        goto endjob;

4998 4999
    if (qemuConnectAgent(driver, vm) < 0)
        goto endjob;
5000

5001
    if (flags & VIR_MIGRATE_PERSIST_DEST) {
5002
        if (qemuMigrationDstPersist(driver, vm, mig, !v3proto) < 0) {
5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013
            /* Hmpf.  Migration was successful, but making it persistent
             * was not.  If we report successful, then when this domain
             * shuts down, management tools are in for a surprise.  On the
             * other hand, if we report failure, then the management tools
             * might try to restart the domain on the source side, even
             * though the domain is actually running on the destination.
             * Pretend success and hope that this is a rare situation and
             * management tools are smart.
             *
             * However, in v3 protocol, the source VM is still available
             * to restart during confirm() step, so we kill it off now.
5014 5015 5016
             */
            if (v3proto)
                goto endjob;
5017
        }
5018
    }
5019

5020 5021 5022
    /* We need to wait for QEMU to process all data sent by the source
     * before starting guest CPUs.
     */
5023 5024 5025
    if (qemuMigrationDstWaitForCompletion(driver, vm,
                                          QEMU_ASYNC_JOB_MIGRATION_IN,
                                          !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
5026 5027 5028 5029 5030 5031
        /* There's not much we can do for v2 protocol since the
         * original domain on the source host is already gone.
         */
        if (v3proto)
            goto endjob;
    }
5032

5033 5034 5035 5036 5037 5038 5039 5040 5041
    /* Now that the state data was transferred we can refresh the actual state
     * of the devices */
    if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
        /* Similarly to the case above v2 protocol will not be able to recover
         * from this. Let's ignore this and perhaps stuff will not break. */
        if (v3proto)
            goto endjob;
    }

5042
    if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
5043 5044
        inPostCopy = true;

5045 5046 5047 5048 5049
    if (!(flags & VIR_MIGRATE_PAUSED)) {
        /* run 'cont' on the destination, which allows migration on qemu
         * >= 0.10.6 to work properly.  This isn't strictly necessary on
         * older qemu's, but it also doesn't hurt anything there
         */
5050
        if (qemuProcessStartCPUs(driver, vm,
5051 5052
                                 inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
                                            : VIR_DOMAIN_RUNNING_MIGRATED,
5053
                                 QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
5054
            if (virGetLastErrorCode() == VIR_ERR_OK)
5055 5056 5057 5058 5059 5060
                virReportError(VIR_ERR_INTERNAL_ERROR,
                               "%s", _("resume operation failed"));
            /* Need to save the current error, in case shutting
             * down the process overwrites it
             */
            orig_err = virSaveLastError();
5061

5062 5063 5064 5065 5066 5067 5068 5069 5070 5071
            /*
             * In v3 protocol, the source VM is still available to
             * restart during confirm() step, so we kill it off
             * now.
             * In v2 protocol, the source is dead, so we leave
             * target in paused state, in case admin can fix
             * things up.
             */
            if (v3proto)
                goto endjob;
5072
        }
5073

5074
        if (inPostCopy)
5075
            doKill = false;
5076
    }
5077

5078
    if (mig->jobInfo) {
5079
        jobInfo = mig->jobInfo;
5080 5081 5082 5083 5084 5085
        mig->jobInfo = NULL;

        if (jobInfo->sent && timeReceived) {
            jobInfo->timeDelta = timeReceived - jobInfo->sent;
            jobInfo->received = timeReceived;
            jobInfo->timeDeltaSet = true;
5086
        }
5087 5088
        qemuDomainJobInfoUpdateTime(jobInfo);
        qemuDomainJobInfoUpdateDowntime(jobInfo);
5089
    }
L
liguang 已提交
5090

5091
    if (inPostCopy) {
5092 5093 5094
        if (qemuMigrationDstWaitForCompletion(driver, vm,
                                              QEMU_ASYNC_JOB_MIGRATION_IN,
                                              false) < 0) {
5095 5096 5097 5098 5099 5100 5101 5102 5103
            goto endjob;
        }
        if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
            virDomainObjSetState(vm,
                                 VIR_DOMAIN_RUNNING,
                                 VIR_DOMAIN_RUNNING_MIGRATED);
        }
    }

5104
    dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, vm->def->id);
5105

5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119
    if (inPostCopy) {
        /* The only RESUME event during post-copy migration is triggered by
         * QEMU when the running domain moves from the source to the
         * destination host, but then the migration keeps running until all
         * modified memory is transferred from the source host. This will
         * result in VIR_DOMAIN_EVENT_RESUMED with RESUMED_POSTCOPY detail.
         * However, our API documentation says we need to fire another RESUMED
         * event at the very end of migration with RESUMED_MIGRATED detail.
         */
        event = virDomainEventLifecycleNewFromObj(vm,
                                                  VIR_DOMAIN_EVENT_RESUMED,
                                                  VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
        virObjectEventStateQueue(driver->domainEventState, event);
    }
5120

5121 5122 5123 5124 5125
    if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
        virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
        event = virDomainEventLifecycleNewFromObj(vm,
                                                  VIR_DOMAIN_EVENT_SUSPENDED,
                                                  VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
5126
        virObjectEventStateQueue(driver->domainEventState, event);
5127
    }
5128

5129
    if (virDomainObjIsActive(vm) &&
5130
        virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
5131 5132 5133 5134 5135
        VIR_WARN("Failed to save status on vm %s", vm->def->name);

    /* Guest is successfully running, so cancel previous auto destroy */
    qemuProcessAutoDestroyRemove(driver, vm);

5136
 endjob:
5137
    if (!dom &&
5138 5139
        !(flags & VIR_MIGRATE_OFFLINE) &&
        virDomainObjIsActive(vm)) {
5140
        if (doKill) {
5141 5142 5143 5144 5145 5146 5147
            qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
                            QEMU_ASYNC_JOB_MIGRATION_IN,
                            VIR_QEMU_PROCESS_STOP_MIGRATED);
            virDomainAuditStop(vm, "failed");
            event = virDomainEventLifecycleNewFromObj(vm,
                                VIR_DOMAIN_EVENT_STOPPED,
                                VIR_DOMAIN_EVENT_STOPPED_FAILED);
5148
            virObjectEventStateQueue(driver->domainEventState, event);
5149
        } else {
5150
            qemuMigrationAnyPostcopyFailed(driver, vm);
5151
        }
5152 5153
    }

5154
    if (dom) {
5155 5156 5157
        if (jobInfo) {
            VIR_STEAL_PTR(priv->job.completed, jobInfo);
            priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
5158
            priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
5159
        }
5160

5161 5162 5163
        if (qemuMigrationBakeCookie(mig, driver, vm,
                                    QEMU_MIGRATION_DESTINATION,
                                    cookieout, cookieoutlen,
5164 5165
                                    QEMU_MIGRATION_COOKIE_STATS) < 0)
            VIR_WARN("Unable to encode migration cookie");
5166 5167 5168 5169 5170 5171

        /* Remove completed stats for post-copy, everything but timing fields
         * is obsolete anyway.
         */
        if (inPostCopy)
            VIR_FREE(priv->job.completed);
5172
    }
5173

5174
    qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
5175
                             priv->job.migParams, priv->job.apiFlags);
5176

5177
    qemuMigrationJobFinish(driver, vm);
5178
    if (!virDomainObjIsActive(vm))
5179
        qemuDomainRemoveInactiveJob(driver, vm);
5180

5181
 cleanup:
5182
    VIR_FREE(jobInfo);
5183
    virPortAllocatorRelease(port);
5184
    if (priv->mon)
5185
        qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
5186
    VIR_FREE(priv->origname);
M
Michal Privoznik 已提交
5187
    virDomainObjEndAPI(&vm);
5188
    qemuMigrationCookieFree(mig);
5189 5190 5191 5192
    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }
5193
    virObjectUnref(cfg);
5194 5195 5196 5197

    /* Set a special error if Finish is expected to return NULL as a result of
     * successful call with retcode != 0
     */
5198
    if (retcode != 0 && !dom && virGetLastErrorCode() == VIR_ERR_OK)
5199
        virReportError(VIR_ERR_MIGRATE_FINISH_OK, NULL);
5200 5201
    return dom;
}
5202

5203

5204
/* Helper function called while vm is active.  */
5205
int
5206 5207 5208 5209
qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
                       int fd,
                       const char *compressor,
                       qemuDomainAsyncJob asyncJob)
5210 5211 5212
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    int rc;
5213
    int ret = -1;
5214 5215
    virCommandPtr cmd = NULL;
    int pipeFD[2] = { -1, -1 };
5216
    unsigned long saveMigBandwidth = priv->migMaxBandwidth;
5217
    char *errbuf = NULL;
5218
    virErrorPtr orig_err = NULL;
5219 5220 5221 5222 5223

    /* Increase migration bandwidth to unlimited since target is a file.
     * Failure to change migration speed is not fatal. */
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
        qemuMonitorSetMigrationSpeed(priv->mon,
5224 5225
                                     QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
        priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
5226 5227
        if (qemuDomainObjExitMonitor(driver, vm) < 0)
            return -1;
5228
    }
5229

5230 5231 5232 5233 5234 5235 5236
    if (!virDomainObjIsActive(vm)) {
        virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
                       _("guest unexpectedly quit"));
        /* nothing to tear down */
        return -1;
    }

5237 5238 5239 5240
    if (compressor && pipe(pipeFD) < 0) {
        virReportSystemError(errno, "%s",
                             _("Failed to create pipe for migration"));
        return -1;
5241 5242
    }

5243 5244 5245 5246
    /* All right! We can use fd migration, which means that qemu
     * doesn't have to open() the file, so while we still have to
     * grant SELinux access, we can do it on fd and avoid cleanup
     * later, as well as skip futzing with cgroup.  */
5247 5248
    if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
                                    compressor ? pipeFD[1] : fd) < 0)
5249 5250
        goto cleanup;

5251
    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
5252 5253
        goto cleanup;

5254
    if (!compressor) {
5255 5256 5257
        rc = qemuMonitorMigrateToFd(priv->mon,
                                    QEMU_MONITOR_MIGRATE_BACKGROUND,
                                    fd);
5258 5259 5260 5261 5262 5263 5264
    } else {
        const char *prog = compressor;
        const char *args[] = {
            prog,
            "-c",
            NULL
        };
5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275

        cmd = virCommandNewArgs(args);
        virCommandSetInputFD(cmd, pipeFD[0]);
        virCommandSetOutputFD(cmd, &fd);
        virCommandSetErrorBuffer(cmd, &errbuf);
        virCommandDoAsyncIO(cmd);
        if (virSetCloseExec(pipeFD[1]) < 0) {
            virReportSystemError(errno, "%s",
                                 _("Unable to set cloexec flag"));
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
            goto cleanup;
5276
        }
5277 5278 5279 5280 5281 5282 5283 5284 5285 5286
        if (virCommandRunAsync(cmd, NULL) < 0) {
            ignore_value(qemuDomainObjExitMonitor(driver, vm));
            goto cleanup;
        }
        rc = qemuMonitorMigrateToFd(priv->mon,
                                    QEMU_MONITOR_MIGRATE_BACKGROUND,
                                    pipeFD[1]);
        if (VIR_CLOSE(pipeFD[0]) < 0 ||
            VIR_CLOSE(pipeFD[1]) < 0)
            VIR_WARN("failed to close intermediate pipe");
5287
    }
5288
    if (qemuDomainObjExitMonitor(driver, vm) < 0)
5289
        goto cleanup;
5290 5291 5292
    if (rc < 0)
        goto cleanup;

5293
    rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0);
5294

5295 5296 5297 5298
    if (rc < 0) {
        if (rc == -2) {
            orig_err = virSaveLastError();
            virCommandAbort(cmd);
5299 5300
            if (virDomainObjIsActive(vm) &&
                qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5301
                qemuMonitorMigrateCancel(priv->mon);
5302
                ignore_value(qemuDomainObjExitMonitor(driver, vm));
5303 5304
            }
        }
5305
        goto cleanup;
5306
    }
5307

5308 5309 5310
    if (cmd && virCommandWait(cmd, NULL) < 0)
        goto cleanup;

5311
    qemuDomainEventEmitJobCompleted(driver, vm);
5312 5313
    ret = 0;

5314
 cleanup:
5315 5316 5317
    if (ret < 0 && !orig_err)
        orig_err = virSaveLastError();

5318
    /* Restore max migration bandwidth */
5319 5320
    if (virDomainObjIsActive(vm) &&
        qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
5321 5322
        qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth);
        priv->migMaxBandwidth = saveMigBandwidth;
5323
        ignore_value(qemuDomainObjExitMonitor(driver, vm));
5324 5325
    }

5326 5327
    VIR_FORCE_CLOSE(pipeFD[0]);
    VIR_FORCE_CLOSE(pipeFD[1]);
5328 5329 5330 5331 5332
    if (cmd) {
        VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf));
        VIR_FREE(errbuf);
        virCommandFree(cmd);
    }
5333 5334 5335 5336 5337 5338

    if (orig_err) {
        virSetError(orig_err);
        virFreeError(orig_err);
    }

5339 5340
    return ret;
}
5341

5342 5343

int
5344 5345
qemuMigrationSrcCancel(virQEMUDriverPtr driver,
                       virDomainObjPtr vm)
5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358
{
    qemuDomainObjPrivatePtr priv = vm->privateData;
    virHashTablePtr blockJobs = NULL;
    bool storage = false;
    size_t i;
    int ret = -1;

    VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
              vm->def->name);

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) {
5359
            qemuBlockJobSyncBeginDisk(disk);
5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388
            storage = true;
        }
    }

    qemuDomainObjEnterMonitor(driver, vm);

    ignore_value(qemuMonitorMigrateCancel(priv->mon));
    if (storage)
        blockJobs = qemuMonitorGetAllBlockJobInfo(priv->mon);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || (storage && !blockJobs))
        goto endsyncjob;

    if (!storage) {
        ret = 0;
        goto cleanup;
    }

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);

        if (!diskPriv->migrating)
            continue;

        if (virHashLookup(blockJobs, disk->info.alias)) {
            VIR_DEBUG("Drive mirror on disk %s is still running", disk->dst);
        } else {
            VIR_DEBUG("Drive mirror on disk %s is gone", disk->dst);
5389
            qemuBlockJobSyncEndDisk(vm, QEMU_ASYNC_JOB_NONE, disk);
5390 5391 5392 5393
            diskPriv->migrating = false;
        }
    }

5394 5395
    if (qemuMigrationSrcNBDCopyCancel(driver, vm, false,
                                      QEMU_ASYNC_JOB_NONE, NULL) < 0)
5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410
        goto endsyncjob;

    ret = 0;

 cleanup:
    virHashFree(blockJobs);
    return ret;

 endsyncjob:
    if (storage) {
        for (i = 0; i < vm->def->ndisks; i++) {
            virDomainDiskDefPtr disk = vm->def->disks[i];
            qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);

            if (diskPriv->migrating) {
5411
                qemuBlockJobSyncEndDisk(vm, QEMU_ASYNC_JOB_NONE, disk);
5412 5413 5414 5415 5416 5417 5418 5419
                diskPriv->migrating = false;
            }
        }
    }
    goto cleanup;
}


5420
static int
5421
qemuMigrationJobStart(virQEMUDriverPtr driver,
5422
                      virDomainObjPtr vm,
5423 5424
                      qemuDomainAsyncJob job,
                      unsigned long apiFlags)
5425
{
5426
    qemuDomainObjPrivatePtr priv = vm->privateData;
5427 5428
    virDomainJobOperation op;
    unsigned long long mask;
5429

5430
    if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
5431 5432
        op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
        mask = QEMU_JOB_NONE;
5433
    } else {
5434 5435 5436 5437
        op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
        mask = QEMU_JOB_DEFAULT_MASK |
               JOB_MASK(QEMU_JOB_SUSPEND) |
               JOB_MASK(QEMU_JOB_MIGRATION_OP);
5438
    }
5439

5440
    if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
5441 5442
        return -1;

5443 5444
    priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;

5445
    qemuDomainObjSetAsyncJobMask(vm, mask);
5446 5447 5448
    return 0;
}

5449
static void
5450
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
5451
                         virDomainObjPtr vm,
5452
                         qemuMigrationJobPhase phase)
5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (phase < priv->job.phase) {
        VIR_ERROR(_("migration protocol going backwards %s => %s"),
                  qemuMigrationJobPhaseTypeToString(priv->job.phase),
                  qemuMigrationJobPhaseTypeToString(phase));
        return;
    }

    qemuDomainObjSetJobPhase(driver, vm, phase);
}

5466
static void
5467
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
5468
                           virDomainObjPtr vm,
5469
                           qemuMigrationJobPhase phase)
5470 5471 5472 5473
{
    qemuMigrationJobSetPhase(driver, vm, phase);
}

5474
static void
5475 5476
qemuMigrationJobContinue(virDomainObjPtr vm)
{
5477
    qemuDomainObjReleaseAsyncJob(vm);
5478 5479
}

5480
static bool
5481
qemuMigrationJobIsActive(virDomainObjPtr vm,
5482
                         qemuDomainAsyncJob job)
5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493
{
    qemuDomainObjPrivatePtr priv = vm->privateData;

    if (priv->job.asyncJob != job) {
        const char *msg;

        if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
            msg = _("domain '%s' is not processing incoming migration");
        else
            msg = _("domain '%s' is not being migrated");

5494
        virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
5495 5496 5497 5498 5499
        return false;
    }
    return true;
}

5500
static void
5501
qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
5502
{
5503
    qemuDomainObjEndAsyncJob(driver, vm);
5504
}
5505 5506 5507


static void
5508
qemuMigrationDstErrorFree(void *data,
5509 5510 5511 5512 5513 5514 5515
                       const void *name ATTRIBUTE_UNUSED)
{
    virErrorPtr err = data;
    virFreeError(err);
}

int
5516
qemuMigrationDstErrorInit(virQEMUDriverPtr driver)
5517
{
5518
    driver->migrationErrors = virHashAtomicNew(64, qemuMigrationDstErrorFree);
5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529
    if (driver->migrationErrors)
        return 0;
    else
        return -1;
}

/**
 * This function consumes @err; the caller should consider the @err pointer
 * invalid after calling this function.
 */
void
5530 5531 5532
qemuMigrationDstErrorSave(virQEMUDriverPtr driver,
                          const char *name,
                          virErrorPtr err)
5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545
{
    if (!err)
        return;

    VIR_DEBUG("Saving incoming migration error for domain %s: %s",
              name, err->message);
    if (virHashAtomicUpdate(driver->migrationErrors, name, err) < 0) {
        VIR_WARN("Failed to save migration error for domain '%s'", name);
        virFreeError(err);
    }
}

void
5546 5547
qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
                            const char *name)
5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558
{
    virErrorPtr err;

    if (!(err = virHashAtomicSteal(driver->migrationErrors, name)))
        return;

    VIR_DEBUG("Restoring saved incoming migration error for domain %s: %s",
              name, err->message);
    virSetError(err);
    virFreeError(err);
}
5559 5560


5561
int
5562 5563 5564 5565
qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver,
                                 virDomainObjPtr vm,
                                 qemuDomainAsyncJob asyncJob,
                                 qemuDomainJobInfoPtr jobInfo)
5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609
{
    size_t i;
    qemuDomainObjPrivatePtr priv = vm->privateData;
    bool nbd = false;
    virHashTablePtr blockinfo = NULL;
    qemuDomainMirrorStatsPtr stats = &jobInfo->mirrorStats;

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) {
            nbd = true;
            break;
        }
    }

    if (!nbd)
        return 0;

    if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
        return -1;

    blockinfo = qemuMonitorGetAllBlockJobInfo(priv->mon);

    if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockinfo)
        return -1;

    memset(stats, 0, sizeof(*stats));

    for (i = 0; i < vm->def->ndisks; i++) {
        virDomainDiskDefPtr disk = vm->def->disks[i];
        qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
        qemuMonitorBlockJobInfoPtr data;

        if (!diskPriv->migrating ||
            !(data = virHashLookup(blockinfo, disk->info.alias)))
            continue;

        stats->transferred += data->cur;
        stats->total += data->end;
    }

    virHashFree(blockinfo);
    return 0;
}