migration.c 108.9 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * QEMU live migration
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
A
aliguori 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qemu/cutils.h"
18
#include "qemu/error-report.h"
19
#include "migration/blocker.h"
20
#include "exec.h"
21
#include "fd.h"
22
#include "socket.h"
23
#include "rdma.h"
24
#include "ram.h"
J
Juan Quintela 已提交
25
#include "migration/global_state.h"
26
#include "migration/misc.h"
27
#include "migration.h"
28
#include "savevm.h"
29
#include "qemu-file-channel.h"
J
Juan Quintela 已提交
30
#include "qemu-file.h"
31
#include "migration/vmstate.h"
32
#include "block/block.h"
33
#include "qapi/error.h"
34 35
#include "qapi/clone-visitor.h"
#include "qapi/qapi-visit-sockets.h"
36 37
#include "qapi/qapi-commands-migration.h"
#include "qapi/qapi-events-migration.h"
38
#include "qapi/qmp/qerror.h"
39
#include "qapi/qmp/qnull.h"
40
#include "qemu/rcu.h"
41
#include "block.h"
42
#include "postcopy-ram.h"
43
#include "qemu/thread.h"
44
#include "trace.h"
45
#include "exec/target_page.h"
46
#include "io/channel-buffer.h"
47
#include "migration/colo.h"
48
#include "hw/boards.h"
P
Peter Xu 已提交
49
#include "monitor/monitor.h"
50
#include "net/announce.h"
51

52
#define MAX_THROTTLE  (32 << 20)      /* Migration transfer speed throttling */
A
aliguori 已提交
53

J
Juan Quintela 已提交
54 55 56 57 58
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
 * data. */
#define BUFFER_DELAY     100
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)

59 60 61 62
/* Time in milliseconds we are allowed to stop the source,
 * for sending the last part */
#define DEFAULT_MIGRATE_SET_DOWNTIME 300

63 64 65 66
/* Maximum migrate downtime set to 2000 seconds */
#define MAX_MIGRATE_DOWNTIME_SECONDS 2000
#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)

67 68
/* Default compression thread count */
#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
69 70 71
/* Default decompression thread count, usually decompression is at
 * least 4 times as fast as compression.*/
#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
72 73
/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
74
/* Define default autoconverge cpu throttle migration parameters */
75 76
#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
77
#define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99
78

79
/* Migration XBZRLE default cache size */
80
#define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024)
81

82 83
/* The delay time (in ms) between two COLO checkpoints */
#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100)
84
#define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2
85

86 87 88 89 90
/* Background transfer rate for postcopy, 0 means unlimited, note
 * that page requests can still exceed this limit.
 */
#define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0

91 92 93 94 95 96 97 98 99
/*
 * Parameters for self_announce_delay giving a stream of RARP/ARP
 * packets after migration.
 */
#define DEFAULT_MIGRATE_ANNOUNCE_INITIAL  50
#define DEFAULT_MIGRATE_ANNOUNCE_MAX     550
#define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS    5
#define DEFAULT_MIGRATE_ANNOUNCE_STEP    100

100 101 102
static NotifierList migration_state_notifiers =
    NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);

D
Dr. David Alan Gilbert 已提交
103 104
static bool deferred_incoming;

105 106 107 108 109 110 111 112
/* Messages sent on the return path from destination to source */
enum mig_rp_message_type {
    MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
    MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
    MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */

    MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
    MIG_RP_MSG_REQ_PAGES,    /* data (start: be64, len: be32) */
113
    MIG_RP_MSG_RECV_BITMAP,  /* send recved_bitmap back to source */
114
    MIG_RP_MSG_RESUME_ACK,   /* tell source that we are ready to resume */
115 116 117 118

    MIG_RP_MSG_MAX
};

119 120 121 122
/* When we add fault tolerance, we could have several
   migrations at once.  For now we don't need to add
   dynamic creation of migration */

123
static MigrationState *current_migration;
124
static MigrationIncomingState *current_incoming;
125

126
static bool migration_object_check(MigrationState *ms, Error **errp);
127 128 129
static int migration_maybe_pause(MigrationState *s,
                                 int *current_active_state,
                                 int new_state);
130
static void migrate_fd_cancel(MigrationState *s);
131

132 133
void migration_object_init(void)
{
134
    MachineState *ms = MACHINE(qdev_get_machine());
135
    Error *err = NULL;
136

137 138 139
    /* This can only be called once. */
    assert(!current_migration);
    current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
140

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
    /*
     * Init the migrate incoming object as well no matter whether
     * we'll use it or not.
     */
    assert(!current_incoming);
    current_incoming = g_new0(MigrationIncomingState, 1);
    current_incoming->state = MIGRATION_STATUS_NONE;
    current_incoming->postcopy_remote_fds =
        g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
    qemu_mutex_init(&current_incoming->rp_mutex);
    qemu_event_init(&current_incoming->main_thread_load_event, false);
    qemu_sem_init(&current_incoming->postcopy_pause_sem_dst, 0);
    qemu_sem_init(&current_incoming->postcopy_pause_sem_fault, 0);

    init_dirty_bitmap_incoming_migration();

157 158 159 160 161
    if (!migration_object_check(current_migration, &err)) {
        error_report_err(err);
        exit(1);
    }

162 163 164 165 166 167 168 169
    /*
     * We cannot really do this in migration_instance_init() since at
     * that time global properties are not yet applied, then this
     * value will be definitely replaced by something else.
     */
    if (ms->enforce_config_section) {
        current_migration->send_configuration = true;
    }
170 171
}

172
void migration_shutdown(void)
173
{
174 175 176 177 178
    /*
     * Cancel the current migration - that will (eventually)
     * stop the migration using this structure
     */
    migrate_fd_cancel(current_migration);
179 180 181
    object_unref(OBJECT(current_migration));
}

182
/* For outgoing */
183
MigrationState *migrate_get_current(void)
184
{
185 186 187
    /* This can only be called after the object created. */
    assert(current_migration);
    return current_migration;
188 189
}

190 191
MigrationIncomingState *migration_incoming_get_current(void)
{
192 193
    assert(current_incoming);
    return current_incoming;
194 195 196 197
}

void migration_incoming_state_destroy(void)
{
198 199
    struct MigrationIncomingState *mis = migration_incoming_get_current();

200
    if (mis->to_src_file) {
201 202
        /* Tell source that we are done */
        migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
203 204 205 206
        qemu_fclose(mis->to_src_file);
        mis->to_src_file = NULL;
    }

207 208 209 210
    if (mis->from_src_file) {
        qemu_fclose(mis->from_src_file);
        mis->from_src_file = NULL;
    }
211 212 213 214
    if (mis->postcopy_remote_fds) {
        g_array_free(mis->postcopy_remote_fds, TRUE);
        mis->postcopy_remote_fds = NULL;
    }
215

216
    qemu_event_reset(&mis->main_thread_load_event);
217 218 219 220 221

    if (mis->socket_address_list) {
        qapi_free_SocketAddressList(mis->socket_address_list);
        mis->socket_address_list = NULL;
    }
222 223
}

224 225 226
static void migrate_generate_event(int new_state)
{
    if (migrate_use_events()) {
227
        qapi_event_send_migration(new_state);
228 229 230
    }
}

231 232 233 234 235 236 237 238 239 240
static bool migrate_late_block_activate(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[
        MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
}

D
Dr. David Alan Gilbert 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253
/*
 * Called on -incoming with a defer: uri.
 * The migration can be started later after any parameters have been
 * changed.
 */
static void deferred_incoming_migration(Error **errp)
{
    if (deferred_incoming) {
        error_setg(errp, "Incoming migration already deferred");
    }
    deferred_incoming = true;
}

254 255 256 257
/*
 * Send a message on the return channel back to the source
 * of the migration.
 */
P
Peter Xu 已提交
258 259 260
static int migrate_send_rp_message(MigrationIncomingState *mis,
                                   enum mig_rp_message_type message_type,
                                   uint16_t len, void *data)
261
{
P
Peter Xu 已提交
262 263
    int ret = 0;

264 265
    trace_migrate_send_rp_message((int)message_type, len);
    qemu_mutex_lock(&mis->rp_mutex);
P
Peter Xu 已提交
266 267 268 269 270 271 272 273 274 275

    /*
     * It's possible that the file handle got lost due to network
     * failures.
     */
    if (!mis->to_src_file) {
        ret = -EIO;
        goto error;
    }

276 277 278 279
    qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
    qemu_put_be16(mis->to_src_file, len);
    qemu_put_buffer(mis->to_src_file, data, len);
    qemu_fflush(mis->to_src_file);
P
Peter Xu 已提交
280 281 282 283 284

    /* It's possible that qemu file got error during sending */
    ret = qemu_file_get_error(mis->to_src_file);

error:
285
    qemu_mutex_unlock(&mis->rp_mutex);
P
Peter Xu 已提交
286
    return ret;
287 288
}

289 290 291 292 293 294 295
/* Request a range of pages from the source VM at the given
 * start address.
 *   rbname: Name of the RAMBlock to request the page in, if NULL it's the same
 *           as the last request (a name must have been given previously)
 *   Start: Address offset within the RB
 *   Len: Length in bytes required - must be a multiple of pagesize
 */
P
Peter Xu 已提交
296 297
int migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
                              ram_addr_t start, size_t len)
298
{
S
Stefan Weil 已提交
299
    uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
300
    size_t msglen = 12; /* start + len */
P
Peter Xu 已提交
301
    enum mig_rp_message_type msg_type;
302 303 304 305 306 307 308 309 310 311 312

    *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
    *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);

    if (rbname) {
        int rbname_len = strlen(rbname);
        assert(rbname_len < 256);

        bufc[msglen++] = rbname_len;
        memcpy(bufc + msglen, rbname, rbname_len);
        msglen += rbname_len;
P
Peter Xu 已提交
313
        msg_type = MIG_RP_MSG_REQ_PAGES_ID;
314
    } else {
P
Peter Xu 已提交
315
        msg_type = MIG_RP_MSG_REQ_PAGES;
316
    }
P
Peter Xu 已提交
317 318

    return migrate_send_rp_message(mis, msg_type, msglen, bufc);
319 320
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
static bool migration_colo_enabled;
bool migration_incoming_colo_enabled(void)
{
    return migration_colo_enabled;
}

void migration_incoming_disable_colo(void)
{
    migration_colo_enabled = false;
}

void migration_incoming_enable_colo(void)
{
    migration_colo_enabled = true;
}

337 338 339 340 341 342 343 344 345 346 347
void migrate_add_address(SocketAddress *address)
{
    MigrationIncomingState *mis = migration_incoming_get_current();
    SocketAddressList *addrs;

    addrs = g_new0(SocketAddressList, 1);
    addrs->next = mis->socket_address_list;
    mis->socket_address_list = addrs;
    addrs->value = QAPI_CLONE(SocketAddress, address);
}

348
void qemu_start_incoming_migration(const char *uri, Error **errp)
A
aliguori 已提交
349
{
A
aliguori 已提交
350 351
    const char *p;

352
    qapi_event_send_migration(MIGRATION_STATUS_SETUP);
D
Dr. David Alan Gilbert 已提交
353 354 355
    if (!strcmp(uri, "defer")) {
        deferred_incoming_migration(errp);
    } else if (strstart(uri, "tcp:", &p)) {
356
        tcp_start_incoming_migration(p, errp);
M
Michael R. Hines 已提交
357
#ifdef CONFIG_RDMA
D
Dr. David Alan Gilbert 已提交
358
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
359 360
        rdma_start_incoming_migration(p, errp);
#endif
D
Dr. David Alan Gilbert 已提交
361
    } else if (strstart(uri, "exec:", &p)) {
362
        exec_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
363
    } else if (strstart(uri, "unix:", &p)) {
364
        unix_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
365
    } else if (strstart(uri, "fd:", &p)) {
366
        fd_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
367
    } else {
368
        error_setg(errp, "unknown migration protocol: %s", uri);
J
Juan Quintela 已提交
369
    }
A
aliguori 已提交
370 371
}

372 373 374 375 376
static void process_incoming_migration_bh(void *opaque)
{
    Error *local_err = NULL;
    MigrationIncomingState *mis = opaque;

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
    /* If capability late_block_activate is set:
     * Only fire up the block code now if we're going to restart the
     * VM, else 'cont' will do it.
     * This causes file locking to happen; so we don't want it to happen
     * unless we really are starting the VM.
     */
    if (!migrate_late_block_activate() ||
         (autostart && (!global_state_received() ||
            global_state_get_runstate() == RUN_STATE_RUNNING))) {
        /* Make sure all file formats flush their mutable metadata.
         * If we get an error here, just don't restart the VM yet. */
        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
            local_err = NULL;
            autostart = false;
        }
394 395
    }

396 397 398 399
    /*
     * This must happen after all error conditions are dealt with and
     * we're sure the VM is going to be running on this host.
     */
400
    qemu_announce_self(&mis->announce_timer, migrate_announce_params());
401

402 403 404 405
    if (multifd_load_cleanup(&local_err) != 0) {
        error_report_err(local_err);
        autostart = false;
    }
406 407 408 409
    /* If global state section was not received or we are in running
       state, we need to obey autostart. Any other state is set with
       runstate_set. */

410 411
    dirty_bitmap_mig_before_vm_start();

412 413 414 415 416 417 418
    if (!global_state_received() ||
        global_state_get_runstate() == RUN_STATE_RUNNING) {
        if (autostart) {
            vm_start();
        } else {
            runstate_set(RUN_STATE_PAUSED);
        }
419 420 421
    } else if (migration_incoming_colo_enabled()) {
        migration_incoming_disable_colo();
        vm_start();
422 423 424 425 426 427 428 429 430 431 432 433 434 435
    } else {
        runstate_set(global_state_get_runstate());
    }
    /*
     * This must happen after any state changes since as soon as an external
     * observer sees this event they might start to prod at the VM assuming
     * it's ready to use.
     */
    migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                      MIGRATION_STATUS_COMPLETED);
    qemu_bh_delete(mis->bh);
    migration_incoming_state_destroy();
}

436
static void process_incoming_migration_co(void *opaque)
437
{
438
    MigrationIncomingState *mis = migration_incoming_get_current();
439
    PostcopyState ps;
440
    int ret;
441
    Error *local_err = NULL;
442

443
    assert(mis->from_src_file);
444
    mis->migration_incoming_co = qemu_coroutine_self();
445
    mis->largest_page_size = qemu_ram_pagesize_largest();
446
    postcopy_state_set(POSTCOPY_INCOMING_NONE);
447 448
    migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
                      MIGRATION_STATUS_ACTIVE);
449
    ret = qemu_loadvm_state(mis->from_src_file);
450

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
    ps = postcopy_state_get();
    trace_process_incoming_migration_co_end(ret, ps);
    if (ps != POSTCOPY_INCOMING_NONE) {
        if (ps == POSTCOPY_INCOMING_ADVISE) {
            /*
             * Where a migration had postcopy enabled (and thus went to advise)
             * but managed to complete within the precopy period, we can use
             * the normal exit.
             */
            postcopy_ram_incoming_cleanup(mis);
        } else if (ret >= 0) {
            /*
             * Postcopy was started, cleanup should happen at the end of the
             * postcopy thread.
             */
            trace_process_incoming_migration_co_postcopy_end_main();
            return;
        }
        /* Else if something went wrong then just fall out of the normal exit */
    }

472
    /* we get COLO info, and know if we are in COLO mode */
473
    if (!ret && migration_incoming_colo_enabled()) {
474 475 476 477
        /* Make sure all file formats flush their mutable metadata */
        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
478
            goto fail;
479 480
        }

481 482
        if (colo_init_ram_cache() < 0) {
            error_report("Init ram cache failed");
483
            goto fail;
484 485
        }

486 487 488 489 490 491 492
        qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
             colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
        mis->have_colo_incoming_thread = true;
        qemu_coroutine_yield();

        /* Wait checkpoint incoming thread exit before free resource */
        qemu_thread_join(&mis->colo_incoming_thread);
493 494
        /* We hold the global iothread lock, so it is safe here */
        colo_release_ram_cache();
495 496
    }

497
    if (ret < 0) {
498
        error_report("load of migration failed: %s", strerror(-ret));
499
        goto fail;
500
    }
501 502
    mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
    qemu_bh_schedule(mis->bh);
503
    mis->migration_incoming_co = NULL;
504 505 506 507 508 509 510 511 512 513
    return;
fail:
    local_err = NULL;
    migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                      MIGRATION_STATUS_FAILED);
    qemu_fclose(mis->from_src_file);
    if (multifd_load_cleanup(&local_err) != 0) {
        error_report_err(local_err);
    }
    exit(EXIT_FAILURE);
514 515
}

516
static void migration_incoming_setup(QEMUFile *f)
517
{
518
    MigrationIncomingState *mis = migration_incoming_get_current();
519

520 521 522 523 524 525
    if (multifd_load_setup() != 0) {
        /* We haven't been able to create multifd threads
           nothing better to do */
        exit(EXIT_FAILURE);
    }

526 527 528
    if (!mis->from_src_file) {
        mis->from_src_file = f;
    }
529
    qemu_file_set_blocking(f, false);
530 531
}

532
void migration_incoming_process(void)
533 534
{
    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
535
    qemu_coroutine_enter(co);
536 537
}

P
Peter Xu 已提交
538 539
/* Returns true if recovered from a paused migration, otherwise false */
static bool postcopy_try_recover(QEMUFile *f)
540
{
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
    MigrationIncomingState *mis = migration_incoming_get_current();

    if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
        /* Resumed from a paused postcopy migration */

        mis->from_src_file = f;
        /* Postcopy has standalone thread to do vm load */
        qemu_file_set_blocking(f, true);

        /* Re-configure the return path */
        mis->to_src_file = qemu_file_get_return_path(f);

        migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
                          MIGRATION_STATUS_POSTCOPY_RECOVER);

        /*
         * Here, we only wake up the main loading thread (while the
         * fault thread will still be waiting), so that we can receive
         * commands from source now, and answer it if needed. The
         * fault thread will be woken up afterwards until we are sure
         * that source is ready to reply to page requests.
         */
        qemu_sem_post(&mis->postcopy_pause_sem_dst);
P
Peter Xu 已提交
564 565 566 567 568 569 570 571 572 573
        return true;
    }

    return false;
}

void migration_fd_process_incoming(QEMUFile *f)
{
    if (postcopy_try_recover(f)) {
        return;
574
    }
P
Peter Xu 已提交
575 576 577

    migration_incoming_setup(f);
    migration_incoming_process();
578 579
}

580
void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
581 582
{
    MigrationIncomingState *mis = migration_incoming_get_current();
P
Peter Xu 已提交
583
    bool start_migration;
584 585

    if (!mis->from_src_file) {
P
Peter Xu 已提交
586
        /* The first connection (multifd may have multiple) */
587
        QEMUFile *f = qemu_fopen_channel_input(ioc);
P
Peter Xu 已提交
588 589

        /* If it's a recovery, we're done */
P
Peter Xu 已提交
590 591 592
        if (postcopy_try_recover(f)) {
            return;
        }
P
Peter Xu 已提交
593

594
        migration_incoming_setup(f);
P
Peter Xu 已提交
595 596 597 598 599 600 601

        /*
         * Common migration only needs one channel, so we can start
         * right now.  Multifd needs more than one channel, we wait.
         */
        start_migration = !migrate_use_multifd();
    } else {
602
        Error *local_err = NULL;
P
Peter Xu 已提交
603 604
        /* Multiple connections */
        assert(migrate_use_multifd());
605 606 607 608 609
        start_migration = multifd_recv_new_channel(ioc, &local_err);
        if (local_err) {
            error_propagate(errp, local_err);
            return;
        }
610
    }
611

P
Peter Xu 已提交
612
    if (start_migration) {
613 614
        migration_incoming_process();
    }
615 616
}

617 618 619 620 621 622 623 624
/**
 * @migration_has_all_channels: We have received all channels that we need
 *
 * Returns true when we have got connections to all the channels that
 * we need for migration.
 */
bool migration_has_all_channels(void)
{
625
    MigrationIncomingState *mis = migration_incoming_get_current();
626 627 628 629
    bool all_channels;

    all_channels = multifd_recv_all_channels_created();

630
    return all_channels && mis->from_src_file != NULL;
631 632
}

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
/*
 * Send a 'SHUT' message on the return channel with the given value
 * to indicate that we've finished with the RP.  Non-0 value indicates
 * error.
 */
void migrate_send_rp_shut(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
}

/*
 * Send a 'PONG' message on the return channel with the given value
 * (normally in response to a 'PING')
 */
void migrate_send_rp_pong(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
}

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
                                 char *block_name)
{
    char buf[512];
    int len;
    int64_t res;

    /*
     * First, we send the header part. It contains only the len of
     * idstr, and the idstr itself.
     */
    len = strlen(block_name);
    buf[0] = len;
    memcpy(buf + 1, block_name, len);

    if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
        error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
                     __func__);
        return;
    }

    migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);

    /*
     * Next, we dump the received bitmap to the stream.
     *
     * TODO: currently we are safe since we are the only one that is
     * using the to_src_file handle (fault thread is still paused),
     * and it's ok even not taking the mutex. However the best way is
     * to take the lock before sending the message header, and release
     * the lock after sending the bitmap.
     */
    qemu_mutex_lock(&mis->rp_mutex);
    res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
    qemu_mutex_unlock(&mis->rp_mutex);

    trace_migrate_send_rp_recv_bitmap(block_name, res);
}

699 700 701 702 703 704 705 706
void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
}

O
Orit Wasserman 已提交
707 708 709 710 711 712 713
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
{
    MigrationCapabilityStatusList *head = NULL;
    MigrationCapabilityStatusList *caps;
    MigrationState *s = migrate_get_current();
    int i;

714
    caps = NULL; /* silence compiler warning */
715
    for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
716 717 718 719 720
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
        if (i == MIGRATION_CAPABILITY_BLOCK) {
            continue;
        }
#endif
O
Orit Wasserman 已提交
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
        if (head == NULL) {
            head = g_malloc0(sizeof(*caps));
            caps = head;
        } else {
            caps->next = g_malloc0(sizeof(*caps));
            caps = caps->next;
        }
        caps->value =
            g_malloc(sizeof(*caps->value));
        caps->value->capability = i;
        caps->value->state = s->enabled_capabilities[i];
    }

    return head;
}

737 738 739 740 741
MigrationParameters *qmp_query_migrate_parameters(Error **errp)
{
    MigrationParameters *params;
    MigrationState *s = migrate_get_current();

742
    /* TODO use QAPI_CLONE() instead of duplicating it inline */
743
    params = g_malloc0(sizeof(*params));
744
    params->has_compress_level = true;
745
    params->compress_level = s->parameters.compress_level;
746
    params->has_compress_threads = true;
747
    params->compress_threads = s->parameters.compress_threads;
748 749
    params->has_compress_wait_thread = true;
    params->compress_wait_thread = s->parameters.compress_wait_thread;
750
    params->has_decompress_threads = true;
751
    params->decompress_threads = s->parameters.decompress_threads;
752
    params->has_cpu_throttle_initial = true;
753
    params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
754
    params->has_cpu_throttle_increment = true;
755
    params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
756
    params->has_tls_creds = true;
757
    params->tls_creds = g_strdup(s->parameters.tls_creds);
758
    params->has_tls_hostname = true;
759
    params->tls_hostname = g_strdup(s->parameters.tls_hostname);
760 761
    params->has_tls_authz = true;
    params->tls_authz = g_strdup(s->parameters.tls_authz);
762 763 764 765
    params->has_max_bandwidth = true;
    params->max_bandwidth = s->parameters.max_bandwidth;
    params->has_downtime_limit = true;
    params->downtime_limit = s->parameters.downtime_limit;
766
    params->has_x_checkpoint_delay = true;
767
    params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
768 769
    params->has_block_incremental = true;
    params->block_incremental = s->parameters.block_incremental;
J
Juan Quintela 已提交
770 771
    params->has_multifd_channels = true;
    params->multifd_channels = s->parameters.multifd_channels;
772 773
    params->has_xbzrle_cache_size = true;
    params->xbzrle_cache_size = s->parameters.xbzrle_cache_size;
774 775
    params->has_max_postcopy_bandwidth = true;
    params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth;
776 777
    params->has_max_cpu_throttle = true;
    params->max_cpu_throttle = s->parameters.max_cpu_throttle;
778 779 780 781 782 783 784 785
    params->has_announce_initial = true;
    params->announce_initial = s->parameters.announce_initial;
    params->has_announce_max = true;
    params->announce_max = s->parameters.announce_max;
    params->has_announce_rounds = true;
    params->announce_rounds = s->parameters.announce_rounds;
    params->has_announce_step = true;
    params->announce_step = s->parameters.announce_step;
786 787 788 789

    return params;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803
AnnounceParameters *migrate_announce_params(void)
{
    static AnnounceParameters ap;

    MigrationState *s = migrate_get_current();

    ap.initial = s->parameters.announce_initial;
    ap.max = s->parameters.announce_max;
    ap.rounds = s->parameters.announce_rounds;
    ap.step = s->parameters.announce_step;

    return &ap;
}

804 805 806 807
/*
 * Return true if we're already in the middle of a migration
 * (i.e. any of the active or setup states)
 */
808
bool migration_is_setup_or_active(int state)
809 810 811
{
    switch (state) {
    case MIGRATION_STATUS_ACTIVE:
812
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
P
Peter Xu 已提交
813
    case MIGRATION_STATUS_POSTCOPY_PAUSED:
814
    case MIGRATION_STATUS_POSTCOPY_RECOVER:
815
    case MIGRATION_STATUS_SETUP:
816 817
    case MIGRATION_STATUS_PRE_SWITCHOVER:
    case MIGRATION_STATUS_DEVICE:
818 819 820 821 822 823 824 825
        return true;

    default:
        return false;

    }
}

826 827 828 829
static void populate_ram_info(MigrationInfo *info, MigrationState *s)
{
    info->has_ram = true;
    info->ram = g_malloc0(sizeof(*info->ram));
830
    info->ram->transferred = ram_counters.transferred;
831
    info->ram->total = ram_bytes_total();
832
    info->ram->duplicate = ram_counters.duplicate;
833 834
    /* legacy value.  It is not used anymore */
    info->ram->skipped = 0;
835 836
    info->ram->normal = ram_counters.normal;
    info->ram->normal_bytes = ram_counters.normal *
837
        qemu_target_page_size();
838
    info->ram->mbps = s->mbps;
839 840
    info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
    info->ram->postcopy_requests = ram_counters.postcopy_requests;
841
    info->ram->page_size = qemu_target_page_size();
842
    info->ram->multifd_bytes = ram_counters.multifd_bytes;
843
    info->ram->pages_per_second = s->pages_per_second;
844

845 846 847 848
    if (migrate_use_xbzrle()) {
        info->has_xbzrle_cache = true;
        info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
        info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
849 850 851 852 853
        info->xbzrle_cache->bytes = xbzrle_counters.bytes;
        info->xbzrle_cache->pages = xbzrle_counters.pages;
        info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
        info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
        info->xbzrle_cache->overflow = xbzrle_counters.overflow;
854 855
    }

856 857 858 859 860 861 862 863 864 865 866 867
    if (migrate_use_compression()) {
        info->has_compression = true;
        info->compression = g_malloc0(sizeof(*info->compression));
        info->compression->pages = compression_counters.pages;
        info->compression->busy = compression_counters.busy;
        info->compression->busy_rate = compression_counters.busy_rate;
        info->compression->compressed_size =
                                    compression_counters.compressed_size;
        info->compression->compression_rate =
                                    compression_counters.compression_rate;
    }

868 869 870 871 872
    if (cpu_throttle_active()) {
        info->has_cpu_throttle_percentage = true;
        info->cpu_throttle_percentage = cpu_throttle_get_percentage();
    }

873 874
    if (s->state != MIGRATION_STATUS_COMPLETED) {
        info->ram->remaining = ram_bytes_remaining();
875
        info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate;
876 877 878
    }
}

879 880 881 882 883 884 885 886 887 888 889
static void populate_disk_info(MigrationInfo *info)
{
    if (blk_mig_active()) {
        info->has_disk = true;
        info->disk = g_malloc0(sizeof(*info->disk));
        info->disk->transferred = blk_mig_bytes_transferred();
        info->disk->remaining = blk_mig_bytes_remaining();
        info->disk->total = blk_mig_bytes_total();
    }
}

890
static void fill_source_migration_info(MigrationInfo *info)
A
aliguori 已提交
891
{
892 893 894
    MigrationState *s = migrate_get_current();

    switch (s->state) {
895
    case MIGRATION_STATUS_NONE:
896
        /* no migration has happened ever */
897 898
        /* do not overwrite destination migration status */
        return;
899
        break;
900
    case MIGRATION_STATUS_SETUP:
901
        info->has_status = true;
902
        info->has_total_time = false;
903
        break;
904 905
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_CANCELLING:
906
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
907 908
    case MIGRATION_STATUS_PRE_SWITCHOVER:
    case MIGRATION_STATUS_DEVICE:
P
Peter Xu 已提交
909
    case MIGRATION_STATUS_POSTCOPY_PAUSED:
910
    case MIGRATION_STATUS_POSTCOPY_RECOVER:
911
         /* TODO add some postcopy stats */
912 913 914
        info->has_status = true;
        info->has_total_time = true;
        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
915
            - s->start_time;
916 917 918 919 920
        info->has_expected_downtime = true;
        info->expected_downtime = s->expected_downtime;
        info->has_setup_time = true;
        info->setup_time = s->setup_time;

921
        populate_ram_info(info, s);
922
        populate_disk_info(info);
923
        break;
924 925 926 927
    case MIGRATION_STATUS_COLO:
        info->has_status = true;
        /* TODO: display COLO specific information (checkpoint info etc.) */
        break;
928
    case MIGRATION_STATUS_COMPLETED:
L
Luiz Capitulino 已提交
929
        info->has_status = true;
930
        info->has_total_time = true;
931
        info->total_time = s->total_time;
932 933
        info->has_downtime = true;
        info->downtime = s->downtime;
934 935
        info->has_setup_time = true;
        info->setup_time = s->setup_time;
J
Juan Quintela 已提交
936

937
        populate_ram_info(info, s);
938
        break;
939
    case MIGRATION_STATUS_FAILED:
L
Luiz Capitulino 已提交
940
        info->has_status = true;
941 942 943 944
        if (s->error) {
            info->has_error_desc = true;
            info->error_desc = g_strdup(error_get_pretty(s->error));
        }
945
        break;
946
    case MIGRATION_STATUS_CANCELLED:
L
Luiz Capitulino 已提交
947
        info->has_status = true;
948
        break;
A
aliguori 已提交
949
    }
950
    info->status = s->state;
A
aliguori 已提交
951 952
}

953 954 955 956 957 958 959 960 961 962 963 964
/**
 * @migration_caps_check - check capability validity
 *
 * @cap_list: old capability list, array of bool
 * @params: new capabilities to be applied soon
 * @errp: set *errp if the check failed, with reason
 *
 * Returns true if check passed, otherwise false.
 */
static bool migrate_caps_check(bool *cap_list,
                               MigrationCapabilityStatusList *params,
                               Error **errp)
O
Orit Wasserman 已提交
965 966
{
    MigrationCapabilityStatusList *cap;
967
    bool old_postcopy_cap;
968
    MigrationIncomingState *mis = migration_incoming_get_current();
O
Orit Wasserman 已提交
969

970
    old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM];
O
Orit Wasserman 已提交
971 972

    for (cap = params; cap; cap = cap->next) {
973 974 975
        cap_list[cap->value->capability] = cap->value->state;
    }

976
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
977 978 979 980 981
    if (cap_list[MIGRATION_CAPABILITY_BLOCK]) {
        error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
                   "block migration");
        error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
        return false;
O
Orit Wasserman 已提交
982
    }
983
#endif
984

985 986 987 988 989 990 991 992 993
#ifndef CONFIG_REPLICATION
    if (cap_list[MIGRATION_CAPABILITY_X_COLO]) {
        error_setg(errp, "QEMU compiled without replication module"
                   " can't enable COLO");
        error_append_hint(errp, "Please enable replication before COLO.\n");
        return false;
    }
#endif

994 995
    if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
        if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) {
996 997 998 999 1000
            /* The decompression threads asynchronously write into RAM
             * rather than use the atomic copies needed to avoid
             * userfaulting.  It should be possible to fix the decompression
             * threads for compatibility in future.
             */
1001 1002 1003
            error_setg(errp, "Postcopy is not currently compatible "
                       "with compression");
            return false;
1004
        }
1005

1006 1007 1008 1009 1010
        /* This check is reasonably expensive, so only when it's being
         * set the first time, also it's only the destination that needs
         * special support.
         */
        if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
1011
            !postcopy_ram_supported_by_host(mis)) {
1012 1013 1014
            /* postcopy_ram_supported_by_host will have emitted a more
             * detailed message
             */
1015 1016
            error_setg(errp, "Postcopy is not supported");
            return false;
1017
        }
1018 1019 1020 1021 1022

        if (cap_list[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
            error_setg(errp, "Postcopy is not compatible with ignore-shared");
            return false;
        }
1023
    }
1024 1025 1026 1027

    return true;
}

1028 1029 1030 1031
static void fill_destination_migration_info(MigrationInfo *info)
{
    MigrationIncomingState *mis = migration_incoming_get_current();

1032 1033 1034 1035 1036 1037
    if (mis->socket_address_list) {
        info->has_socket_address = true;
        info->socket_address =
            QAPI_CLONE(SocketAddressList, mis->socket_address_list);
    }

1038 1039 1040 1041 1042 1043 1044 1045 1046
    switch (mis->state) {
    case MIGRATION_STATUS_NONE:
        return;
        break;
    case MIGRATION_STATUS_SETUP:
    case MIGRATION_STATUS_CANCELLING:
    case MIGRATION_STATUS_CANCELLED:
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1047 1048
    case MIGRATION_STATUS_POSTCOPY_PAUSED:
    case MIGRATION_STATUS_POSTCOPY_RECOVER:
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
    case MIGRATION_STATUS_FAILED:
    case MIGRATION_STATUS_COLO:
        info->has_status = true;
        break;
    case MIGRATION_STATUS_COMPLETED:
        info->has_status = true;
        fill_destination_postcopy_migration_info(info);
        break;
    }
    info->status = mis->state;
}

MigrationInfo *qmp_query_migrate(Error **errp)
{
    MigrationInfo *info = g_malloc0(sizeof(*info));

    fill_destination_migration_info(info);
    fill_source_migration_info(info);

    return info;
}

1071 1072 1073 1074 1075
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
                                  Error **errp)
{
    MigrationState *s = migrate_get_current();
    MigrationCapabilityStatusList *cap;
1076
    bool cap_list[MIGRATION_CAPABILITY__MAX];
1077 1078 1079 1080 1081 1082

    if (migration_is_setup_or_active(s->state)) {
        error_setg(errp, QERR_MIGRATION_ACTIVE);
        return;
    }

1083 1084
    memcpy(cap_list, s->enabled_capabilities, sizeof(cap_list));
    if (!migrate_caps_check(cap_list, params, errp)) {
1085 1086 1087 1088 1089 1090
        return;
    }

    for (cap = params; cap; cap = cap->next) {
        s->enabled_capabilities[cap->value->capability] = cap->value->state;
    }
O
Orit Wasserman 已提交
1091 1092
}

1093 1094 1095 1096 1097
/*
 * Check whether the parameters are valid. Error will be put into errp
 * (if provided). Return true if valid, otherwise false.
 */
static bool migrate_params_check(MigrationParameters *params, Error **errp)
1098
{
1099
    if (params->has_compress_level &&
1100
        (params->compress_level > 9)) {
1101 1102
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
                   "is invalid, it should be in the range of 0 to 9");
1103
        return false;
1104
    }
1105

1106
    if (params->has_compress_threads && (params->compress_threads < 1)) {
1107 1108 1109
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "compress_threads",
                   "is invalid, it should be in the range of 1 to 255");
1110
        return false;
1111
    }
1112

1113
    if (params->has_decompress_threads && (params->decompress_threads < 1)) {
1114 1115 1116
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "decompress_threads",
                   "is invalid, it should be in the range of 1 to 255");
1117
        return false;
1118
    }
1119

1120 1121 1122
    if (params->has_cpu_throttle_initial &&
        (params->cpu_throttle_initial < 1 ||
         params->cpu_throttle_initial > 99)) {
1123
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1124
                   "cpu_throttle_initial",
1125
                   "an integer in the range of 1 to 99");
1126
        return false;
1127
    }
1128

1129 1130 1131
    if (params->has_cpu_throttle_increment &&
        (params->cpu_throttle_increment < 1 ||
         params->cpu_throttle_increment > 99)) {
1132
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1133
                   "cpu_throttle_increment",
1134
                   "an integer in the range of 1 to 99");
1135
        return false;
1136
    }
1137

1138
    if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) {
1139 1140
        error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
                         " range of 0 to %zu bytes/second", SIZE_MAX);
1141
        return false;
1142
    }
1143

1144
    if (params->has_downtime_limit &&
1145
        (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
1146 1147 1148
        error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
                         "the range of 0 to %d milliseconds",
                         MAX_MIGRATE_DOWNTIME);
1149
        return false;
1150
    }
1151

1152 1153
    /* x_checkpoint_delay is now always positive */

J
Juan Quintela 已提交
1154
    if (params->has_multifd_channels && (params->multifd_channels < 1)) {
1155 1156 1157 1158 1159
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "multifd_channels",
                   "is invalid, it should be in the range of 1 to 255");
        return false;
    }
1160

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
    if (params->has_xbzrle_cache_size &&
        (params->xbzrle_cache_size < qemu_target_page_size() ||
         !is_power_of_2(params->xbzrle_cache_size))) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "xbzrle_cache_size",
                   "is invalid, it should be bigger than target page size"
                   " and a power of two");
        return false;
    }

1171 1172 1173 1174 1175 1176 1177 1178 1179
    if (params->has_max_cpu_throttle &&
        (params->max_cpu_throttle < params->cpu_throttle_initial ||
         params->max_cpu_throttle > 99)) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "max_cpu_throttle",
                   "an integer in the range of cpu_throttle_initial to 99");
        return false;
    }

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
    if (params->has_announce_initial &&
        params->announce_initial > 100000) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "announce_initial",
                   "is invalid, it must be less than 100000 ms");
        return false;
    }
    if (params->has_announce_max &&
        params->announce_max > 100000) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "announce_max",
                   "is invalid, it must be less than 100000 ms");
       return false;
    }
    if (params->has_announce_rounds &&
        params->announce_rounds > 1000) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "announce_rounds",
                   "is invalid, it must be in the range of 0 to 1000");
       return false;
    }
    if (params->has_announce_step &&
        (params->announce_step < 1 ||
        params->announce_step > 10000)) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "announce_step",
                   "is invalid, it must be in the range of 1 to 10000 ms");
       return false;
    }
1209 1210 1211
    return true;
}

1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
static void migrate_params_test_apply(MigrateSetParameters *params,
                                      MigrationParameters *dest)
{
    *dest = migrate_get_current()->parameters;

    /* TODO use QAPI_CLONE() instead of duplicating it inline */

    if (params->has_compress_level) {
        dest->compress_level = params->compress_level;
    }

    if (params->has_compress_threads) {
        dest->compress_threads = params->compress_threads;
    }

1227 1228 1229 1230
    if (params->has_compress_wait_thread) {
        dest->compress_wait_thread = params->compress_wait_thread;
    }

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
    if (params->has_decompress_threads) {
        dest->decompress_threads = params->decompress_threads;
    }

    if (params->has_cpu_throttle_initial) {
        dest->cpu_throttle_initial = params->cpu_throttle_initial;
    }

    if (params->has_cpu_throttle_increment) {
        dest->cpu_throttle_increment = params->cpu_throttle_increment;
    }

    if (params->has_tls_creds) {
1244 1245
        assert(params->tls_creds->type == QTYPE_QSTRING);
        dest->tls_creds = g_strdup(params->tls_creds->u.s);
1246 1247 1248
    }

    if (params->has_tls_hostname) {
1249 1250
        assert(params->tls_hostname->type == QTYPE_QSTRING);
        dest->tls_hostname = g_strdup(params->tls_hostname->u.s);
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
    }

    if (params->has_max_bandwidth) {
        dest->max_bandwidth = params->max_bandwidth;
    }

    if (params->has_downtime_limit) {
        dest->downtime_limit = params->downtime_limit;
    }

    if (params->has_x_checkpoint_delay) {
        dest->x_checkpoint_delay = params->x_checkpoint_delay;
    }

    if (params->has_block_incremental) {
        dest->block_incremental = params->block_incremental;
    }
J
Juan Quintela 已提交
1268 1269
    if (params->has_multifd_channels) {
        dest->multifd_channels = params->multifd_channels;
1270
    }
1271 1272 1273
    if (params->has_xbzrle_cache_size) {
        dest->xbzrle_cache_size = params->xbzrle_cache_size;
    }
1274 1275 1276
    if (params->has_max_postcopy_bandwidth) {
        dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth;
    }
1277 1278 1279
    if (params->has_max_cpu_throttle) {
        dest->max_cpu_throttle = params->max_cpu_throttle;
    }
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
    if (params->has_announce_initial) {
        dest->announce_initial = params->announce_initial;
    }
    if (params->has_announce_max) {
        dest->announce_max = params->announce_max;
    }
    if (params->has_announce_rounds) {
        dest->announce_rounds = params->announce_rounds;
    }
    if (params->has_announce_step) {
        dest->announce_step = params->announce_step;
    }
1292 1293
}

1294
static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
1295 1296 1297
{
    MigrationState *s = migrate_get_current();

1298 1299
    /* TODO use QAPI_CLONE() instead of duplicating it inline */

1300 1301
    if (params->has_compress_level) {
        s->parameters.compress_level = params->compress_level;
1302
    }
1303

1304 1305
    if (params->has_compress_threads) {
        s->parameters.compress_threads = params->compress_threads;
1306
    }
1307

1308 1309 1310 1311
    if (params->has_compress_wait_thread) {
        s->parameters.compress_wait_thread = params->compress_wait_thread;
    }

1312 1313
    if (params->has_decompress_threads) {
        s->parameters.decompress_threads = params->decompress_threads;
1314
    }
1315

1316 1317
    if (params->has_cpu_throttle_initial) {
        s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
1318
    }
1319

1320 1321
    if (params->has_cpu_throttle_increment) {
        s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
1322
    }
1323

1324
    if (params->has_tls_creds) {
1325
        g_free(s->parameters.tls_creds);
1326 1327
        assert(params->tls_creds->type == QTYPE_QSTRING);
        s->parameters.tls_creds = g_strdup(params->tls_creds->u.s);
1328
    }
1329

1330
    if (params->has_tls_hostname) {
1331
        g_free(s->parameters.tls_hostname);
1332 1333
        assert(params->tls_hostname->type == QTYPE_QSTRING);
        s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s);
1334
    }
1335

1336 1337 1338 1339 1340 1341
    if (params->has_tls_authz) {
        g_free(s->parameters.tls_authz);
        assert(params->tls_authz->type == QTYPE_QSTRING);
        s->parameters.tls_authz = g_strdup(params->tls_authz->u.s);
    }

1342 1343
    if (params->has_max_bandwidth) {
        s->parameters.max_bandwidth = params->max_bandwidth;
1344
        if (s->to_dst_file && !migration_in_postcopy()) {
1345 1346 1347 1348
            qemu_file_set_rate_limit(s->to_dst_file,
                                s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
        }
    }
1349

1350 1351 1352
    if (params->has_downtime_limit) {
        s->parameters.downtime_limit = params->downtime_limit;
    }
1353 1354 1355

    if (params->has_x_checkpoint_delay) {
        s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
1356 1357 1358
        if (migration_in_colo_state()) {
            colo_checkpoint_notify(s);
        }
1359
    }
1360

1361 1362 1363
    if (params->has_block_incremental) {
        s->parameters.block_incremental = params->block_incremental;
    }
J
Juan Quintela 已提交
1364 1365
    if (params->has_multifd_channels) {
        s->parameters.multifd_channels = params->multifd_channels;
1366
    }
1367 1368 1369 1370
    if (params->has_xbzrle_cache_size) {
        s->parameters.xbzrle_cache_size = params->xbzrle_cache_size;
        xbzrle_cache_resize(params->xbzrle_cache_size, errp);
    }
1371 1372
    if (params->has_max_postcopy_bandwidth) {
        s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth;
1373 1374 1375 1376
        if (s->to_dst_file && migration_in_postcopy()) {
            qemu_file_set_rate_limit(s->to_dst_file,
                    s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO);
        }
1377
    }
1378 1379 1380
    if (params->has_max_cpu_throttle) {
        s->parameters.max_cpu_throttle = params->max_cpu_throttle;
    }
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
    if (params->has_announce_initial) {
        s->parameters.announce_initial = params->announce_initial;
    }
    if (params->has_announce_max) {
        s->parameters.announce_max = params->announce_max;
    }
    if (params->has_announce_rounds) {
        s->parameters.announce_rounds = params->announce_rounds;
    }
    if (params->has_announce_step) {
        s->parameters.announce_step = params->announce_step;
    }
1393 1394
}

1395
void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp)
1396
{
1397 1398
    MigrationParameters tmp;

1399 1400 1401
    /* TODO Rewrite "" to null instead */
    if (params->has_tls_creds
        && params->tls_creds->type == QTYPE_QNULL) {
1402
        qobject_unref(params->tls_creds->u.n);
1403 1404 1405 1406 1407 1408
        params->tls_creds->type = QTYPE_QSTRING;
        params->tls_creds->u.s = strdup("");
    }
    /* TODO Rewrite "" to null instead */
    if (params->has_tls_hostname
        && params->tls_hostname->type == QTYPE_QNULL) {
1409
        qobject_unref(params->tls_hostname->u.n);
1410 1411 1412 1413
        params->tls_hostname->type = QTYPE_QSTRING;
        params->tls_hostname->u.s = strdup("");
    }

1414 1415 1416
    migrate_params_test_apply(params, &tmp);

    if (!migrate_params_check(&tmp, errp)) {
1417 1418 1419 1420
        /* Invalid parameter */
        return;
    }

1421
    migrate_params_apply(params, errp);
1422 1423
}

1424

1425 1426 1427 1428
void qmp_migrate_start_postcopy(Error **errp)
{
    MigrationState *s = migrate_get_current();

1429
    if (!migrate_postcopy()) {
1430
        error_setg(errp, "Enable postcopy with migrate_set_capability before"
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
                         " the start of migration");
        return;
    }

    if (s->state == MIGRATION_STATUS_NONE) {
        error_setg(errp, "Postcopy must be started after migration has been"
                         " started");
        return;
    }
    /*
     * we don't error if migration has finished since that would be racy
     * with issuing this command.
     */
    atomic_set(&s->start_postcopy, true);
}

1447 1448
/* shared migration helpers */

1449
void migrate_set_state(int *state, int old_state, int new_state)
1450
{
1451
    assert(new_state < MIGRATION_STATUS__MAX);
1452
    if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
1453
        trace_migrate_set_state(MigrationStatus_str(new_state));
1454
        migrate_generate_event(new_state);
1455 1456 1457
    }
}

P
Peter Xu 已提交
1458 1459 1460 1461
static MigrationCapabilityStatusList *migrate_cap_add(
    MigrationCapabilityStatusList *list,
    MigrationCapability index,
    bool state)
1462 1463 1464 1465 1466
{
    MigrationCapabilityStatusList *cap;

    cap = g_new0(MigrationCapabilityStatusList, 1);
    cap->value = g_new0(MigrationCapabilityStatus, 1);
P
Peter Xu 已提交
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
    cap->value->capability = index;
    cap->value->state = state;
    cap->next = list;

    return cap;
}

void migrate_set_block_enabled(bool value, Error **errp)
{
    MigrationCapabilityStatusList *cap;

    cap = migrate_cap_add(NULL, MIGRATION_CAPABILITY_BLOCK, value);
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
    qmp_migrate_set_capabilities(cap, errp);
    qapi_free_MigrationCapabilityStatusList(cap);
}

static void migrate_set_block_incremental(MigrationState *s, bool value)
{
    s->parameters.block_incremental = value;
}

static void block_cleanup_parameters(MigrationState *s)
{
    if (s->must_remove_block_options) {
        /* setting to false can never fail */
        migrate_set_block_enabled(false, &error_abort);
        migrate_set_block_incremental(s, false);
        s->must_remove_block_options = false;
    }
}

1498
static void migrate_fd_cleanup(MigrationState *s)
1499
{
1500 1501 1502
    qemu_bh_delete(s->cleanup_bh);
    s->cleanup_bh = NULL;

1503 1504
    qemu_savevm_state_cleanup();

1505
    if (s->to_dst_file) {
1506
        QEMUFile *tmp;
1507

1508
        trace_migrate_fd_cleanup();
1509
        qemu_mutex_unlock_iothread();
1510 1511 1512 1513
        if (s->migration_thread_running) {
            qemu_thread_join(&s->thread);
            s->migration_thread_running = false;
        }
1514 1515
        qemu_mutex_lock_iothread();

1516
        multifd_save_cleanup();
1517 1518
        qemu_mutex_lock(&s->qemu_file_lock);
        tmp = s->to_dst_file;
1519
        s->to_dst_file = NULL;
1520 1521 1522 1523 1524 1525
        qemu_mutex_unlock(&s->qemu_file_lock);
        /*
         * Close the file handle without the lock to make sure the
         * critical section won't block for long.
         */
        qemu_fclose(tmp);
1526 1527
    }

1528 1529
    assert((s->state != MIGRATION_STATUS_ACTIVE) &&
           (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
1530

1531
    if (s->state == MIGRATION_STATUS_CANCELLING) {
1532
        migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
1533
                          MIGRATION_STATUS_CANCELLED);
1534
    }
1535

1536 1537 1538 1539
    if (s->error) {
        /* It is used on info migrate.  We can't free it */
        error_report_err(error_copy(s->error));
    }
1540
    notifier_list_notify(&migration_state_notifiers, s);
1541
    block_cleanup_parameters(s);
1542 1543
}

1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
static void migrate_fd_cleanup_schedule(MigrationState *s)
{
    /*
     * Ref the state for bh, because it may be called when
     * there're already no other refs
     */
    object_ref(OBJECT(s));
    qemu_bh_schedule(s->cleanup_bh);
}

static void migrate_fd_cleanup_bh(void *opaque)
{
    MigrationState *s = opaque;
    migrate_fd_cleanup(s);
    object_unref(OBJECT(s));
}

1561 1562 1563 1564 1565 1566 1567 1568 1569
void migrate_set_error(MigrationState *s, const Error *error)
{
    qemu_mutex_lock(&s->error_mutex);
    if (!s->error) {
        s->error = error_copy(error);
    }
    qemu_mutex_unlock(&s->error_mutex);
}

1570
void migrate_fd_error(MigrationState *s, const Error *error)
1571
{
1572
    trace_migrate_fd_error(error_get_pretty(error));
1573
    assert(s->to_dst_file == NULL);
1574 1575
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_FAILED);
1576
    migrate_set_error(s, error);
1577 1578
}

1579
static void migrate_fd_cancel(MigrationState *s)
1580
{
1581
    int old_state ;
1582
    QEMUFile *f = migrate_get_current()->to_dst_file;
1583
    trace_migrate_fd_cancel();
1584

1585 1586 1587 1588 1589
    if (s->rp_state.from_dst_file) {
        /* shutdown the rp socket, so causing the rp thread to shutdown */
        qemu_file_shutdown(s->rp_state.from_dst_file);
    }

1590 1591
    do {
        old_state = s->state;
1592
        if (!migration_is_setup_or_active(old_state)) {
1593 1594
            break;
        }
1595 1596 1597 1598
        /* If the migration is paused, kick it out of the pause */
        if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
            qemu_sem_post(&s->pause_sem);
        }
1599
        migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1600
    } while (s->state != MIGRATION_STATUS_CANCELLING);
1601 1602 1603 1604 1605 1606 1607 1608

    /*
     * If we're unlucky the migration code might be stuck somewhere in a
     * send/write while the network has failed and is waiting to timeout;
     * if we've got shutdown(2) available then we can force it to quit.
     * The outgoing qemu file gets closed in migrate_fd_cleanup that is
     * called in a bh, so there is no race against this cancel.
     */
1609
    if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1610 1611
        qemu_file_shutdown(f);
    }
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
    if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
        Error *local_err = NULL;

        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
        } else {
            s->block_inactive = false;
        }
    }
1622 1623
}

1624 1625 1626 1627 1628 1629 1630
void add_migration_state_change_notifier(Notifier *notify)
{
    notifier_list_add(&migration_state_notifiers, notify);
}

void remove_migration_state_change_notifier(Notifier *notify)
{
P
Paolo Bonzini 已提交
1631
    notifier_remove(notify);
1632 1633
}

S
Stefan Hajnoczi 已提交
1634
bool migration_in_setup(MigrationState *s)
1635
{
1636
    return s->state == MIGRATION_STATUS_SETUP;
1637 1638
}

1639
bool migration_has_finished(MigrationState *s)
1640
{
1641
    return s->state == MIGRATION_STATUS_COMPLETED;
1642
}
1643

1644 1645
bool migration_has_failed(MigrationState *s)
{
1646 1647
    return (s->state == MIGRATION_STATUS_CANCELLED ||
            s->state == MIGRATION_STATUS_FAILED);
1648 1649
}

1650
bool migration_in_postcopy(void)
1651
{
1652 1653
    MigrationState *s = migrate_get_current();

1654 1655 1656
    return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
}

1657 1658
bool migration_in_postcopy_after_devices(MigrationState *s)
{
1659
    return migration_in_postcopy() && s->postcopy_after_devices;
1660 1661
}

1662
bool migration_is_idle(void)
1663
{
1664 1665 1666 1667 1668
    MigrationState *s = current_migration;

    if (!s) {
        return true;
    }
1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680

    switch (s->state) {
    case MIGRATION_STATUS_NONE:
    case MIGRATION_STATUS_CANCELLED:
    case MIGRATION_STATUS_COMPLETED:
    case MIGRATION_STATUS_FAILED:
        return true;
    case MIGRATION_STATUS_SETUP:
    case MIGRATION_STATUS_CANCELLING:
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
    case MIGRATION_STATUS_COLO:
1681 1682
    case MIGRATION_STATUS_PRE_SWITCHOVER:
    case MIGRATION_STATUS_DEVICE:
1683 1684 1685 1686 1687 1688 1689 1690
        return false;
    case MIGRATION_STATUS__MAX:
        g_assert_not_reached();
    }

    return false;
}

1691
void migrate_init(MigrationState *s)
1692
{
1693 1694 1695 1696 1697 1698 1699
    /*
     * Reinitialise all migration state, except
     * parameters/capabilities that the user set, and
     * locks.
     */
    s->bytes_xfer = 0;
    s->cleanup_bh = 0;
1700
    s->to_dst_file = NULL;
1701 1702 1703 1704
    s->state = MIGRATION_STATUS_NONE;
    s->rp_state.from_dst_file = NULL;
    s->rp_state.error = false;
    s->mbps = 0.0;
1705
    s->pages_per_second = 0.0;
1706 1707 1708 1709
    s->downtime = 0;
    s->expected_downtime = 0;
    s->setup_time = 0;
    s->start_postcopy = false;
1710
    s->postcopy_after_devices = false;
1711
    s->migration_thread_running = false;
1712 1713
    error_free(s->error);
    s->error = NULL;
1714

1715
    migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1716

1717 1718
    s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
    s->total_time = 0;
1719
    s->vm_was_running = false;
1720 1721
    s->iteration_initial_bytes = 0;
    s->threshold_size = 0;
1722
}
1723

A
Anthony Liguori 已提交
1724 1725
static GSList *migration_blockers;

1726
int migrate_add_blocker(Error *reason, Error **errp)
A
Anthony Liguori 已提交
1727
{
1728
    if (only_migratable) {
1729 1730 1731
        error_propagate_prepend(errp, error_copy(reason),
                                "disallowing migration blocker "
                                "(--only_migratable) for: ");
1732 1733 1734
        return -EACCES;
    }

1735
    if (migration_is_idle()) {
1736 1737 1738 1739
        migration_blockers = g_slist_prepend(migration_blockers, reason);
        return 0;
    }

1740 1741 1742
    error_propagate_prepend(errp, error_copy(reason),
                            "disallowing migration blocker "
                            "(migration in progress) for: ");
1743
    return -EBUSY;
A
Anthony Liguori 已提交
1744 1745 1746 1747 1748 1749 1750
}

void migrate_del_blocker(Error *reason)
{
    migration_blockers = g_slist_remove(migration_blockers, reason);
}

D
Dr. David Alan Gilbert 已提交
1751 1752 1753
void qmp_migrate_incoming(const char *uri, Error **errp)
{
    Error *local_err = NULL;
1754
    static bool once = true;
D
Dr. David Alan Gilbert 已提交
1755 1756

    if (!deferred_incoming) {
1757
        error_setg(errp, "For use with '-incoming defer'");
D
Dr. David Alan Gilbert 已提交
1758 1759
        return;
    }
1760 1761 1762
    if (!once) {
        error_setg(errp, "The incoming migration has already been started");
    }
D
Dr. David Alan Gilbert 已提交
1763 1764 1765 1766 1767 1768 1769 1770

    qemu_start_incoming_migration(uri, &local_err);

    if (local_err) {
        error_propagate(errp, local_err);
        return;
    }

1771
    once = false;
D
Dr. David Alan Gilbert 已提交
1772 1773
}

1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
void qmp_migrate_recover(const char *uri, Error **errp)
{
    MigrationIncomingState *mis = migration_incoming_get_current();

    if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
        error_setg(errp, "Migrate recover can only be run "
                   "when postcopy is paused.");
        return;
    }

    if (atomic_cmpxchg(&mis->postcopy_recover_triggered,
                       false, true) == true) {
        error_setg(errp, "Migrate recovery is triggered already");
        return;
    }

    /*
     * Note that this call will never start a real migration; it will
     * only re-setup the migration stream and poke existing migration
     * to continue using that newly established channel.
     */
    qemu_start_incoming_migration(uri, errp);
}

1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
void qmp_migrate_pause(Error **errp)
{
    MigrationState *ms = migrate_get_current();
    MigrationIncomingState *mis = migration_incoming_get_current();
    int ret;

    if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
        /* Source side, during postcopy */
        qemu_mutex_lock(&ms->qemu_file_lock);
        ret = qemu_file_shutdown(ms->to_dst_file);
        qemu_mutex_unlock(&ms->qemu_file_lock);
        if (ret) {
            error_setg(errp, "Failed to pause source migration");
        }
        return;
    }

    if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
        ret = qemu_file_shutdown(mis->from_src_file);
        if (ret) {
            error_setg(errp, "Failed to pause destination migration");
        }
        return;
    }

    error_setg(errp, "migrate-pause is currently only supported "
               "during postcopy-active state");
}

1827 1828 1829 1830 1831 1832 1833
bool migration_is_blocked(Error **errp)
{
    if (qemu_savevm_state_blocked(errp)) {
        return true;
    }

    if (migration_blockers) {
1834
        error_propagate(errp, error_copy(migration_blockers->data));
1835 1836 1837 1838 1839 1840
        return true;
    }

    return false;
}

P
Peter Xu 已提交
1841 1842 1843
/* Returns true if continue to migrate, or false if error detected */
static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
                            bool resume, Error **errp)
1844
{
1845
    Error *local_err = NULL;
P
Peter Xu 已提交
1846 1847 1848 1849 1850 1851 1852

    if (resume) {
        if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
            error_setg(errp, "Cannot resume if there is no "
                       "paused migration");
            return false;
        }
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871

        /*
         * Postcopy recovery won't work well with release-ram
         * capability since release-ram will drop the page buffer as
         * long as the page is put into the send buffer.  So if there
         * is a network failure happened, any page buffers that have
         * not yet reached the destination VM but have already been
         * sent from the source VM will be lost forever.  Let's refuse
         * the client from resuming such a postcopy migration.
         * Luckily release-ram was designed to only be used when src
         * and destination VMs are on the same host, so it should be
         * fine.
         */
        if (migrate_release_ram()) {
            error_setg(errp, "Postcopy recovery cannot work "
                       "when release-ram capability is set");
            return false;
        }

P
Peter Xu 已提交
1872 1873 1874
        /* This is a resume, skip init status */
        return true;
    }
1875

1876
    if (migration_is_setup_or_active(s->state) ||
1877 1878
        s->state == MIGRATION_STATUS_CANCELLING ||
        s->state == MIGRATION_STATUS_COLO) {
1879
        error_setg(errp, QERR_MIGRATION_ACTIVE);
P
Peter Xu 已提交
1880
        return false;
1881
    }
P
Peter Xu 已提交
1882

1883 1884
    if (runstate_check(RUN_STATE_INMIGRATE)) {
        error_setg(errp, "Guest is waiting for an incoming migration");
P
Peter Xu 已提交
1885
        return false;
1886 1887
    }

1888
    if (migration_is_blocked(errp)) {
P
Peter Xu 已提交
1889
        return false;
A
Anthony Liguori 已提交
1890 1891
    }

P
Peter Xu 已提交
1892
    if (blk || blk_inc) {
1893 1894 1895
        if (migrate_use_block() || migrate_use_block_incremental()) {
            error_setg(errp, "Command options are incompatible with "
                       "current migration capabilities");
P
Peter Xu 已提交
1896
            return false;
1897 1898 1899 1900
        }
        migrate_set_block_enabled(true, &local_err);
        if (local_err) {
            error_propagate(errp, local_err);
P
Peter Xu 已提交
1901
            return false;
1902 1903 1904 1905
        }
        s->must_remove_block_options = true;
    }

P
Peter Xu 已提交
1906
    if (blk_inc) {
1907 1908 1909
        migrate_set_block_incremental(s, true);
    }

1910
    migrate_init(s);
1911

P
Peter Xu 已提交
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
    return true;
}

void qmp_migrate(const char *uri, bool has_blk, bool blk,
                 bool has_inc, bool inc, bool has_detach, bool detach,
                 bool has_resume, bool resume, Error **errp)
{
    Error *local_err = NULL;
    MigrationState *s = migrate_get_current();
    const char *p;

    if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
                         has_resume && resume, errp)) {
        /* Error detected, put into errp */
        return;
    }

1929
    if (strstart(uri, "tcp:", &p)) {
1930
        tcp_start_outgoing_migration(s, p, &local_err);
M
Michael R. Hines 已提交
1931
#ifdef CONFIG_RDMA
1932
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
1933 1934
        rdma_start_outgoing_migration(s, p, &local_err);
#endif
1935
    } else if (strstart(uri, "exec:", &p)) {
1936
        exec_start_outgoing_migration(s, p, &local_err);
1937
    } else if (strstart(uri, "unix:", &p)) {
1938
        unix_start_outgoing_migration(s, p, &local_err);
1939
    } else if (strstart(uri, "fd:", &p)) {
1940
        fd_start_outgoing_migration(s, p, &local_err);
1941
    } else {
1942 1943
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
                   "a valid migration protocol");
1944 1945
        migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                          MIGRATION_STATUS_FAILED);
1946
        block_cleanup_parameters(s);
L
Luiz Capitulino 已提交
1947
        return;
1948 1949
    }

1950
    if (local_err) {
1951
        migrate_fd_error(s, local_err);
1952
        error_propagate(errp, local_err);
L
Luiz Capitulino 已提交
1953
        return;
1954
    }
1955 1956
}

L
Luiz Capitulino 已提交
1957
void qmp_migrate_cancel(Error **errp)
1958
{
1959
    migrate_fd_cancel(migrate_get_current());
1960 1961
}

1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
void qmp_migrate_continue(MigrationStatus state, Error **errp)
{
    MigrationState *s = migrate_get_current();
    if (s->state != state) {
        error_setg(errp,  "Migration not in expected state: %s",
                   MigrationStatus_str(s->state));
        return;
    }
    qemu_sem_post(&s->pause_sem);
}

1973 1974
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
{
1975 1976 1977 1978
    MigrateSetParameters p = {
        .has_xbzrle_cache_size = true,
        .xbzrle_cache_size = value,
    };
1979

1980
    qmp_migrate_set_parameters(&p, errp);
1981 1982 1983 1984 1985 1986 1987
}

int64_t qmp_query_migrate_cache_size(Error **errp)
{
    return migrate_xbzrle_cache_size();
}

L
Luiz Capitulino 已提交
1988
void qmp_migrate_set_speed(int64_t value, Error **errp)
1989
{
1990
    MigrateSetParameters p = {
1991 1992 1993
        .has_max_bandwidth = true,
        .max_bandwidth = value,
    };
1994

1995
    qmp_migrate_set_parameters(&p, errp);
1996 1997
}

1998
void qmp_migrate_set_downtime(double value, Error **errp)
1999
{
2000 2001 2002 2003 2004 2005 2006
    if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
        error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
                         "the range of 0 to %d seconds",
                         MAX_MIGRATE_DOWNTIME_SECONDS);
        return;
    }

2007 2008 2009
    value *= 1000; /* Convert to milliseconds */
    value = MAX(0, MIN(INT64_MAX, value));

2010
    MigrateSetParameters p = {
2011 2012 2013 2014 2015
        .has_downtime_limit = true,
        .downtime_limit = value,
    };

    qmp_migrate_set_parameters(&p, errp);
2016
}
2017

2018 2019 2020 2021 2022 2023 2024 2025 2026
bool migrate_release_ram(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
}

2027 2028 2029 2030 2031 2032
bool migrate_postcopy_ram(void)
{
    MigrationState *s;

    s = migrate_get_current();

D
Dr. David Alan Gilbert 已提交
2033
    return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
2034 2035
}

2036 2037
bool migrate_postcopy(void)
{
2038
    return migrate_postcopy_ram() || migrate_dirty_bitmaps();
2039 2040
}

2041 2042 2043 2044 2045 2046 2047 2048 2049
bool migrate_auto_converge(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
}

2050 2051 2052 2053 2054 2055 2056 2057 2058
bool migrate_zero_blocks(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
}

2059 2060 2061 2062 2063 2064 2065 2066 2067
bool migrate_postcopy_blocktime(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
}

2068 2069
bool migrate_use_compression(void)
{
2070 2071 2072 2073 2074
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
2075 2076 2077 2078 2079 2080 2081 2082
}

int migrate_compress_level(void)
{
    MigrationState *s;

    s = migrate_get_current();

2083
    return s->parameters.compress_level;
2084 2085 2086 2087 2088 2089 2090 2091
}

int migrate_compress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

2092
    return s->parameters.compress_threads;
2093 2094
}

2095 2096 2097 2098 2099 2100 2101 2102 2103
int migrate_compress_wait_thread(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->parameters.compress_wait_thread;
}

2104 2105 2106 2107 2108 2109
int migrate_decompress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

2110
    return s->parameters.decompress_threads;
2111 2112
}

2113 2114 2115 2116 2117 2118 2119 2120 2121
bool migrate_dirty_bitmaps(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
}

2122 2123 2124 2125 2126 2127 2128 2129 2130
bool migrate_ignore_shared(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
}

2131 2132 2133 2134 2135 2136 2137 2138 2139
bool migrate_use_events(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
}

2140 2141 2142 2143 2144 2145
bool migrate_use_multifd(void)
{
    MigrationState *s;

    s = migrate_get_current();

J
Juan Quintela 已提交
2146
    return s->enabled_capabilities[MIGRATION_CAPABILITY_MULTIFD];
2147 2148
}

2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
bool migrate_pause_before_switchover(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[
        MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
}

2159 2160 2161 2162 2163 2164
int migrate_multifd_channels(void)
{
    MigrationState *s;

    s = migrate_get_current();

J
Juan Quintela 已提交
2165
    return s->parameters.multifd_channels;
2166 2167
}

2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
int migrate_use_xbzrle(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
}

int64_t migrate_xbzrle_cache_size(void)
{
    MigrationState *s;

    s = migrate_get_current();

2183
    return s->parameters.xbzrle_cache_size;
2184
}
2185

2186 2187 2188 2189 2190 2191 2192 2193 2194
static int64_t migrate_max_postcopy_bandwidth(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->parameters.max_postcopy_bandwidth;
}

2195 2196 2197 2198 2199 2200 2201 2202 2203
bool migrate_use_block(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
}

2204 2205 2206 2207 2208 2209 2210 2211 2212
bool migrate_use_return_path(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
}

2213 2214 2215 2216 2217 2218 2219 2220 2221
bool migrate_use_block_incremental(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->parameters.block_incremental;
}

2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238
/* migration thread support */
/*
 * Something bad happened to the RP stream, mark an error
 * The caller shall print or trace something to indicate why
 */
static void mark_source_rp_bad(MigrationState *s)
{
    s->rp_state.error = true;
}

static struct rp_cmd_args {
    ssize_t     len; /* -1 = variable */
    const char *name;
} rp_cmd_args[] = {
    [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
    [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
    [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
2239 2240
    [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
    [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
2241
    [MIG_RP_MSG_RECV_BITMAP]    = { .len = -1, .name = "RECV_BITMAP" },
2242
    [MIG_RP_MSG_RESUME_ACK]     = { .len =  4, .name = "RESUME_ACK" },
2243 2244 2245
    [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
};

2246 2247 2248 2249 2250 2251 2252 2253
/*
 * Process a request for pages received on the return path,
 * We're allowed to send more than requested (e.g. to round to our page size)
 * and we don't need to send pages that have already been sent.
 */
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
                                       ram_addr_t start, size_t len)
{
2254 2255
    long our_host_ps = getpagesize();

2256
    trace_migrate_handle_rp_req_pages(rbname, start, len);
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269

    /*
     * Since we currently insist on matching page sizes, just sanity check
     * we're being asked for whole host pages.
     */
    if (start & (our_host_ps-1) ||
       (len & (our_host_ps-1))) {
        error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
                     " len: %zd", __func__, start, len);
        mark_source_rp_bad(ms);
        return;
    }

2270
    if (ram_save_queue_pages(rbname, start, len)) {
2271 2272
        mark_source_rp_bad(ms);
    }
2273 2274
}

2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
/* Return true to retry, false to quit */
static bool postcopy_pause_return_path_thread(MigrationState *s)
{
    trace_postcopy_pause_return_path();

    qemu_sem_wait(&s->postcopy_pause_rp_sem);

    trace_postcopy_pause_return_path_continued();

    return true;
}

2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
{
    RAMBlock *block = qemu_ram_block_by_name(block_name);

    if (!block) {
        error_report("%s: invalid block name '%s'", __func__, block_name);
        return -EINVAL;
    }

    /* Fetch the received bitmap and refresh the dirty bitmap */
    return ram_dirty_bitmap_reload(s, block);
}

2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
{
    trace_source_return_path_thread_resume_ack(value);

    if (value != MIGRATION_RESUME_ACK_VALUE) {
        error_report("%s: illegal resume_ack value %"PRIu32,
                     __func__, value);
        return -1;
    }

    /* Now both sides are active. */
    migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
                      MIGRATION_STATUS_POSTCOPY_ACTIVE);

2314 2315
    /* Notify send thread that time to continue send pages */
    qemu_sem_post(&s->rp_state.rp_sem);
2316 2317 2318 2319

    return 0;
}

2320 2321 2322 2323 2324 2325 2326 2327 2328
/*
 * Handles messages sent on the return path towards the source VM
 *
 */
static void *source_return_path_thread(void *opaque)
{
    MigrationState *ms = opaque;
    QEMUFile *rp = ms->rp_state.from_dst_file;
    uint16_t header_len, header_type;
2329
    uint8_t buf[512];
2330
    uint32_t tmp32, sibling_error;
2331 2332
    ram_addr_t start = 0; /* =0 to silence warning */
    size_t  len = 0, expected_len;
2333 2334 2335
    int res;

    trace_source_return_path_thread_entry();
2336
    rcu_register_thread();
2337 2338

retry:
2339 2340 2341 2342 2343 2344
    while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
           migration_is_setup_or_active(ms->state)) {
        trace_source_return_path_thread_loop_top();
        header_type = qemu_get_be16(rp);
        header_len = qemu_get_be16(rp);

2345 2346 2347 2348 2349
        if (qemu_file_get_error(rp)) {
            mark_source_rp_bad(ms);
            goto out;
        }

2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
        if (header_type >= MIG_RP_MSG_MAX ||
            header_type == MIG_RP_MSG_INVALID) {
            error_report("RP: Received invalid message 0x%04x length 0x%04x",
                    header_type, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        if ((rp_cmd_args[header_type].len != -1 &&
            header_len != rp_cmd_args[header_type].len) ||
2360
            header_len > sizeof(buf)) {
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381
            error_report("RP: Received '%s' message (0x%04x) with"
                    "incorrect length %d expecting %zu",
                    rp_cmd_args[header_type].name, header_type, header_len,
                    (size_t)rp_cmd_args[header_type].len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* We know we've got a valid header by this point */
        res = qemu_get_buffer(rp, buf, header_len);
        if (res != header_len) {
            error_report("RP: Failed reading data for message 0x%04x"
                         " read %d expected %d",
                         header_type, res, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* OK, we have the message and the data */
        switch (header_type) {
        case MIG_RP_MSG_SHUT:
2382
            sibling_error = ldl_be_p(buf);
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
            trace_source_return_path_thread_shut(sibling_error);
            if (sibling_error) {
                error_report("RP: Sibling indicated error %d", sibling_error);
                mark_source_rp_bad(ms);
            }
            /*
             * We'll let the main thread deal with closing the RP
             * we could do a shutdown(2) on it, but we're the only user
             * anyway, so there's nothing gained.
             */
            goto out;

        case MIG_RP_MSG_PONG:
2396
            tmp32 = ldl_be_p(buf);
2397 2398 2399
            trace_source_return_path_thread_pong(tmp32);
            break;

2400
        case MIG_RP_MSG_REQ_PAGES:
2401 2402
            start = ldq_be_p(buf);
            len = ldl_be_p(buf + 8);
2403 2404 2405 2406 2407 2408 2409
            migrate_handle_rp_req_pages(ms, NULL, start, len);
            break;

        case MIG_RP_MSG_REQ_PAGES_ID:
            expected_len = 12 + 1; /* header + termination */

            if (header_len >= expected_len) {
2410 2411
                start = ldq_be_p(buf);
                len = ldl_be_p(buf + 8);
2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
                /* Now we expect an idstr */
                tmp32 = buf[12]; /* Length of the following idstr */
                buf[13 + tmp32] = '\0';
                expected_len += tmp32;
            }
            if (header_len != expected_len) {
                error_report("RP: Req_Page_id with length %d expecting %zd",
                        header_len, expected_len);
                mark_source_rp_bad(ms);
                goto out;
            }
            migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
            break;

2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
        case MIG_RP_MSG_RECV_BITMAP:
            if (header_len < 1) {
                error_report("%s: missing block name", __func__);
                mark_source_rp_bad(ms);
                goto out;
            }
            /* Format: len (1B) + idstr (<255B). This ends the idstr. */
            buf[buf[0] + 1] = '\0';
            if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
                mark_source_rp_bad(ms);
                goto out;
            }
            break;

2440 2441 2442 2443 2444 2445 2446 2447
        case MIG_RP_MSG_RESUME_ACK:
            tmp32 = ldl_be_p(buf);
            if (migrate_handle_rp_resume_ack(ms, tmp32)) {
                mark_source_rp_bad(ms);
                goto out;
            }
            break;

2448 2449 2450 2451
        default:
            break;
        }
    }
2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462

out:
    res = qemu_file_get_error(rp);
    if (res) {
        if (res == -EIO) {
            /*
             * Maybe there is something we can do: it looks like a
             * network down issue, and we pause for a recovery.
             */
            if (postcopy_pause_return_path_thread(ms)) {
                /* Reload rp, reset the rest */
2463 2464 2465 2466
                if (rp != ms->rp_state.from_dst_file) {
                    qemu_fclose(rp);
                    rp = ms->rp_state.from_dst_file;
                }
2467 2468 2469 2470 2471
                ms->rp_state.error = false;
                goto retry;
            }
        }

2472 2473 2474 2475 2476 2477 2478
        trace_source_return_path_thread_bad_end();
        mark_source_rp_bad(ms);
    }

    trace_source_return_path_thread_end();
    ms->rp_state.from_dst_file = NULL;
    qemu_fclose(rp);
2479
    rcu_unregister_thread();
2480 2481 2482
    return NULL;
}

P
Peter Xu 已提交
2483 2484
static int open_return_path_on_source(MigrationState *ms,
                                      bool create_thread)
2485 2486
{

2487
    ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
2488 2489 2490 2491 2492
    if (!ms->rp_state.from_dst_file) {
        return -1;
    }

    trace_open_return_path_on_source();
P
Peter Xu 已提交
2493 2494 2495 2496 2497 2498

    if (!create_thread) {
        /* We're done */
        return 0;
    }

2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
    qemu_thread_create(&ms->rp_state.rp_thread, "return path",
                       source_return_path_thread, ms, QEMU_THREAD_JOINABLE);

    trace_open_return_path_on_source_continue();

    return 0;
}

/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
static int await_return_path_close_on_source(MigrationState *ms)
{
    /*
     * If this is a normal exit then the destination will send a SHUT and the
     * rp_thread will exit, however if there's an error we need to cause
     * it to exit.
     */
2515
    if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528
        /*
         * shutdown(2), if we have it, will cause it to unblock if it's stuck
         * waiting for the destination.
         */
        qemu_file_shutdown(ms->rp_state.from_dst_file);
        mark_source_rp_bad(ms);
    }
    trace_await_return_path_close_on_source_joining();
    qemu_thread_join(&ms->rp_state.rp_thread);
    trace_await_return_path_close_on_source_close();
    return ms->rp_state.error;
}

2529 2530 2531 2532
/*
 * Switch from normal iteration to postcopy
 * Returns non-0 on error
 */
2533
static int postcopy_start(MigrationState *ms)
2534 2535
{
    int ret;
2536 2537
    QIOChannelBuffer *bioc;
    QEMUFile *fb;
2538
    int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2539
    int64_t bandwidth = migrate_max_postcopy_bandwidth();
2540
    bool restart_block = false;
2541 2542 2543 2544 2545
    int cur_state = MIGRATION_STATUS_ACTIVE;
    if (!migrate_pause_before_switchover()) {
        migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
                          MIGRATION_STATUS_POSTCOPY_ACTIVE);
    }
2546 2547 2548 2549 2550

    trace_postcopy_start();
    qemu_mutex_lock_iothread();
    trace_postcopy_start_set_run();

2551
    qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2552 2553
    global_state_store();
    ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2554 2555 2556
    if (ret < 0) {
        goto fail;
    }
2557

2558 2559 2560 2561 2562 2563
    ret = migration_maybe_pause(ms, &cur_state,
                                MIGRATION_STATUS_POSTCOPY_ACTIVE);
    if (ret < 0) {
        goto fail;
    }

2564
    ret = bdrv_inactivate_all();
2565 2566 2567
    if (ret < 0) {
        goto fail;
    }
2568
    restart_block = true;
2569

2570 2571 2572 2573
    /*
     * Cause any non-postcopiable, but iterative devices to
     * send out their final data.
     */
2574
    qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
2575

2576 2577 2578 2579 2580 2581
    /*
     * in Finish migrate and with the io-lock held everything should
     * be quiet, but we've potentially still got dirty pages and we
     * need to tell the destination to throw any pages it's already received
     * that are dirty
     */
2582 2583 2584 2585 2586
    if (migrate_postcopy_ram()) {
        if (ram_postcopy_send_discard_bitmap(ms)) {
            error_report("postcopy send discard bitmap failed");
            goto fail;
        }
2587 2588 2589 2590 2591 2592 2593
    }

    /*
     * send rest of state - note things that are doing postcopy
     * will notice we're in POSTCOPY_ACTIVE and not actually
     * wrap their state up here
     */
2594 2595 2596 2597 2598 2599
    /* 0 max-postcopy-bandwidth means unlimited */
    if (!bandwidth) {
        qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
    } else {
        qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO);
    }
2600 2601 2602 2603
    if (migrate_postcopy_ram()) {
        /* Ping just for debugging, helps line traces up */
        qemu_savevm_send_ping(ms->to_dst_file, 2);
    }
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615

    /*
     * While loading the device state we may trigger page transfer
     * requests and the fd must be free to process those, and thus
     * the destination must read the whole device state off the fd before
     * it starts processing it.  Unfortunately the ad-hoc migration format
     * doesn't allow the destination to know the size to read without fully
     * parsing it through each devices load-state code (especially the open
     * coded devices that use get/put).
     * So we wrap the device state up in a package with a length at the start;
     * to do this we use a qemu_buf to hold the whole of the device state.
     */
2616
    bioc = qio_channel_buffer_new(4096);
2617
    qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
2618 2619
    fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
    object_unref(OBJECT(bioc));
2620

2621 2622 2623 2624 2625 2626
    /*
     * Make sure the receiver can get incoming pages before we send the rest
     * of the state
     */
    qemu_savevm_send_postcopy_listen(fb);

2627
    qemu_savevm_state_complete_precopy(fb, false, false);
2628 2629 2630
    if (migrate_postcopy_ram()) {
        qemu_savevm_send_ping(fb, 3);
    }
2631 2632 2633 2634 2635

    qemu_savevm_send_postcopy_run(fb);

    /* <><> end of stuff going into the package */

2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
    /* Last point of recovery; as soon as we send the package the destination
     * can open devices and potentially start running.
     * Lets just check again we've not got any errors.
     */
    ret = qemu_file_get_error(ms->to_dst_file);
    if (ret) {
        error_report("postcopy_start: Migration stream errored (pre package)");
        goto fail_closefb;
    }

    restart_block = false;

2648
    /* Now send that blob */
2649
    if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
2650 2651 2652
        goto fail_closefb;
    }
    qemu_fclose(fb);
2653 2654 2655 2656 2657 2658 2659 2660

    /* Send a notify to give a chance for anything that needs to happen
     * at the transition to postcopy and after the device state; in particular
     * spice needs to trigger a transition now
     */
    ms->postcopy_after_devices = true;
    notifier_list_notify(&migration_state_notifiers, ms);

2661 2662 2663 2664
    ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;

    qemu_mutex_unlock_iothread();

2665 2666 2667 2668 2669 2670 2671
    if (migrate_postcopy_ram()) {
        /*
         * Although this ping is just for debug, it could potentially be
         * used for getting a better measurement of downtime at the source.
         */
        qemu_savevm_send_ping(ms->to_dst_file, 4);
    }
2672

2673 2674 2675 2676
    if (migrate_release_ram()) {
        ram_postcopy_migrated_memory_release(ms);
    }

2677
    ret = qemu_file_get_error(ms->to_dst_file);
2678 2679
    if (ret) {
        error_report("postcopy_start: Migration stream errored");
2680
        migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2681 2682 2683 2684 2685 2686 2687 2688
                              MIGRATION_STATUS_FAILED);
    }

    return ret;

fail_closefb:
    qemu_fclose(fb);
fail:
2689
    migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2690
                          MIGRATION_STATUS_FAILED);
2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
    if (restart_block) {
        /* A failure happened early enough that we know the destination hasn't
         * accessed block devices, so we're safe to recover.
         */
        Error *local_err = NULL;

        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
        }
    }
2702 2703 2704 2705
    qemu_mutex_unlock_iothread();
    return -1;
}

2706 2707 2708 2709 2710
/**
 * migration_maybe_pause: Pause if required to by
 * migrate_pause_before_switchover called with the iothread locked
 * Returns: 0 on success
 */
2711 2712 2713
static int migration_maybe_pause(MigrationState *s,
                                 int *current_active_state,
                                 int new_state)
2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
{
    if (!migrate_pause_before_switchover()) {
        return 0;
    }

    /* Since leaving this state is not atomic with posting the semaphore
     * it's possible that someone could have issued multiple migrate_continue
     * and the semaphore is incorrectly positive at this point;
     * the docs say it's undefined to reinit a semaphore that's already
     * init'd, so use timedwait to eat up any existing posts.
     */
    while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
        /* This block intentionally left blank */
    }

    qemu_mutex_unlock_iothread();
    migrate_set_state(&s->state, *current_active_state,
                      MIGRATION_STATUS_PRE_SWITCHOVER);
    qemu_sem_wait(&s->pause_sem);
    migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
2734 2735
                      new_state);
    *current_active_state = new_state;
2736 2737
    qemu_mutex_lock_iothread();

2738
    return s->state == new_state ? 0 : -EINVAL;
2739 2740
}

2741 2742 2743 2744 2745 2746
/**
 * migration_completion: Used by migration_thread when there's not much left.
 *   The caller 'breaks' the loop when this returns.
 *
 * @s: Current migration state
 */
2747
static void migration_completion(MigrationState *s)
2748 2749
{
    int ret;
2750
    int current_active_state = s->state;
2751

2752 2753
    if (s->state == MIGRATION_STATUS_ACTIVE) {
        qemu_mutex_lock_iothread();
P
Peter Xu 已提交
2754
        s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2755
        qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2756
        s->vm_was_running = runstate_is_running();
2757 2758 2759
        ret = global_state_store();

        if (!ret) {
2760
            bool inactivate = !migrate_colo_enabled();
2761
            ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2762
            if (ret >= 0) {
2763 2764
                ret = migration_maybe_pause(s, &current_active_state,
                                            MIGRATION_STATUS_DEVICE);
2765
            }
2766 2767
            if (ret >= 0) {
                qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
2768 2769
                ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
                                                         inactivate);
2770
            }
2771 2772
            if (inactivate && ret >= 0) {
                s->block_inactive = true;
2773 2774 2775
            }
        }
        qemu_mutex_unlock_iothread();
2776

2777 2778
        if (ret < 0) {
            goto fail;
2779
        }
2780 2781 2782
    } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
        trace_migration_completion_postcopy_end();

2783
        qemu_savevm_state_complete_postcopy(s->to_dst_file);
2784
        trace_migration_completion_postcopy_end_after_complete();
2785 2786
    }

2787 2788 2789 2790 2791 2792
    /*
     * If rp was opened we must clean up the thread before
     * cleaning everything else up (since if there are no failures
     * it will wait for the destination to send it's status in
     * a SHUT command).
     */
P
Peter Xu 已提交
2793
    if (s->rp_state.from_dst_file) {
2794
        int rp_error;
P
Peter Xu 已提交
2795
        trace_migration_return_path_end_before();
2796
        rp_error = await_return_path_close_on_source(s);
P
Peter Xu 已提交
2797
        trace_migration_return_path_end_after(rp_error);
2798
        if (rp_error) {
2799
            goto fail_invalidate;
2800
        }
2801 2802
    }

2803
    if (qemu_file_get_error(s->to_dst_file)) {
2804
        trace_migration_completion_file_err();
2805
        goto fail_invalidate;
2806 2807
    }

2808 2809 2810 2811 2812
    if (!migrate_colo_enabled()) {
        migrate_set_state(&s->state, current_active_state,
                          MIGRATION_STATUS_COMPLETED);
    }

2813 2814
    return;

2815 2816 2817 2818
fail_invalidate:
    /* If not doing postcopy, vm_start() will be called: let's regain
     * control on images.
     */
2819 2820
    if (s->state == MIGRATION_STATUS_ACTIVE ||
        s->state == MIGRATION_STATUS_DEVICE) {
2821 2822
        Error *local_err = NULL;

2823
        qemu_mutex_lock_iothread();
2824 2825 2826
        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
2827 2828
        } else {
            s->block_inactive = false;
2829
        }
2830
        qemu_mutex_unlock_iothread();
2831 2832
    }

2833
fail:
2834 2835
    migrate_set_state(&s->state, current_active_state,
                      MIGRATION_STATUS_FAILED);
2836 2837
}

2838 2839 2840 2841 2842 2843
bool migrate_colo_enabled(void)
{
    MigrationState *s = migrate_get_current();
    return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
}

2844 2845 2846 2847 2848 2849 2850 2851 2852
typedef enum MigThrError {
    /* No error detected */
    MIG_THR_ERR_NONE = 0,
    /* Detected error, but resumed successfully */
    MIG_THR_ERR_RECOVERED = 1,
    /* Detected fatal error, need to exit */
    MIG_THR_ERR_FATAL = 2,
} MigThrError;

2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867
static int postcopy_resume_handshake(MigrationState *s)
{
    qemu_savevm_send_postcopy_resume(s->to_dst_file);

    while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
        qemu_sem_wait(&s->rp_state.rp_sem);
    }

    if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
        return 0;
    }

    return -1;
}

2868 2869 2870
/* Return zero if success, or <0 for error */
static int postcopy_do_resume(MigrationState *s)
{
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884
    int ret;

    /*
     * Call all the resume_prepare() hooks, so that modules can be
     * ready for the migration resume.
     */
    ret = qemu_savevm_state_resume_prepare(s);
    if (ret) {
        error_report("%s: resume_prepare() failure detected: %d",
                     __func__, ret);
        return ret;
    }

    /*
2885 2886
     * Last handshake with destination on the resume (destination will
     * switch to postcopy-active afterwards)
2887
     */
2888 2889 2890 2891 2892
    ret = postcopy_resume_handshake(s);
    if (ret) {
        error_report("%s: handshake failed: %d", __func__, ret);
        return ret;
    }
2893

2894 2895 2896
    return 0;
}

2897 2898 2899 2900 2901 2902 2903 2904 2905
/*
 * We don't return until we are in a safe state to continue current
 * postcopy migration.  Returns MIG_THR_ERR_RECOVERED if recovered, or
 * MIG_THR_ERR_FATAL if unrecovery failure happened.
 */
static MigThrError postcopy_pause(MigrationState *s)
{
    assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);

2906
    while (true) {
2907 2908
        QEMUFile *file;

2909 2910
        migrate_set_state(&s->state, s->state,
                          MIGRATION_STATUS_POSTCOPY_PAUSED);
2911

2912 2913
        /* Current channel is possibly broken. Release it. */
        assert(s->to_dst_file);
2914 2915
        qemu_mutex_lock(&s->qemu_file_lock);
        file = s->to_dst_file;
2916
        s->to_dst_file = NULL;
2917 2918 2919 2920
        qemu_mutex_unlock(&s->qemu_file_lock);

        qemu_file_shutdown(file);
        qemu_fclose(file);
2921

2922 2923
        error_report("Detected IO failure for postcopy. "
                     "Migration paused.");
2924

2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
        /*
         * We wait until things fixed up. Then someone will setup the
         * status back for us.
         */
        while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
            qemu_sem_wait(&s->postcopy_pause_sem);
        }

        if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
            /* Woken up by a recover procedure. Give it a shot */
2935

2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959
            /*
             * Firstly, let's wake up the return path now, with a new
             * return path channel.
             */
            qemu_sem_post(&s->postcopy_pause_rp_sem);

            /* Do the resume logic */
            if (postcopy_do_resume(s) == 0) {
                /* Let's continue! */
                trace_postcopy_pause_continued();
                return MIG_THR_ERR_RECOVERED;
            } else {
                /*
                 * Something wrong happened during the recovery, let's
                 * pause again. Pause is always better than throwing
                 * data away.
                 */
                continue;
            }
        } else {
            /* This is not right... Time to quit. */
            return MIG_THR_ERR_FATAL;
        }
    }
2960 2961 2962 2963 2964
}

static MigThrError migration_detect_error(MigrationState *s)
{
    int ret;
2965 2966 2967 2968 2969 2970 2971
    int state = s->state;

    if (state == MIGRATION_STATUS_CANCELLING ||
        state == MIGRATION_STATUS_CANCELLED) {
        /* End the migration, but don't set the state to failed */
        return MIG_THR_ERR_FATAL;
    }
2972 2973 2974 2975 2976 2977 2978 2979 2980

    /* Try to detect any file errors */
    ret = qemu_file_get_error(s->to_dst_file);

    if (!ret) {
        /* Everything is fine */
        return MIG_THR_ERR_NONE;
    }

2981
    if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret == -EIO) {
2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
        /*
         * For postcopy, we allow the network to be down for a
         * while. After that, it can be continued by a
         * recovery phase.
         */
        return postcopy_pause(s);
    } else {
        /*
         * For precopy (or postcopy with error outside IO), we fail
         * with no time.
         */
2993
        migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
2994 2995 2996 2997 2998 2999 3000
        trace_migration_thread_file_err();

        /* Time to stop the migration, now. */
        return MIG_THR_ERR_FATAL;
    }
}

3001 3002 3003
/* How many bytes have we transferred since the beggining of the migration */
static uint64_t migration_total_bytes(MigrationState *s)
{
3004
    return qemu_ftell(s->to_dst_file) + ram_counters.multifd_bytes;
3005 3006
}

3007 3008
static void migration_calculate_complete(MigrationState *s)
{
3009
    uint64_t bytes = migration_total_bytes(s);
3010
    int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3011
    int64_t transfer_time;
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021

    s->total_time = end_time - s->start_time;
    if (!s->downtime) {
        /*
         * It's still not set, so we are precopy migration.  For
         * postcopy, downtime is calculated during postcopy_start().
         */
        s->downtime = end_time - s->downtime_start;
    }

3022 3023 3024
    transfer_time = s->total_time - s->setup_time;
    if (transfer_time) {
        s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
3025 3026 3027
    }
}

3028 3029 3030
static void migration_update_counters(MigrationState *s,
                                      int64_t current_time)
{
3031
    uint64_t transferred, transferred_pages, time_spent;
3032
    uint64_t current_bytes; /* bytes transferred since the beginning */
3033 3034 3035 3036 3037 3038
    double bandwidth;

    if (current_time < s->iteration_start_time + BUFFER_DELAY) {
        return;
    }

3039 3040
    current_bytes = migration_total_bytes(s);
    transferred = current_bytes - s->iteration_initial_bytes;
3041 3042
    time_spent = current_time - s->iteration_start_time;
    bandwidth = (double)transferred / time_spent;
3043
    s->threshold_size = bandwidth * s->parameters.downtime_limit;
3044 3045 3046 3047

    s->mbps = (((double) transferred * 8.0) /
               ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;

3048 3049 3050 3051 3052
    transferred_pages = ram_get_total_transferred_pages() -
                            s->iteration_initial_pages;
    s->pages_per_second = (double) transferred_pages /
                             (((double) time_spent / 1000.0));

3053 3054 3055 3056 3057
    /*
     * if we haven't sent anything, we don't want to
     * recalculate. 10000 is a small enough number for our purposes
     */
    if (ram_counters.dirty_pages_rate && transferred > 10000) {
3058
        s->expected_downtime = ram_counters.remaining / bandwidth;
3059 3060 3061 3062 3063
    }

    qemu_file_reset_rate_limit(s->to_dst_file);

    s->iteration_start_time = current_time;
3064
    s->iteration_initial_bytes = current_bytes;
3065
    s->iteration_initial_pages = ram_get_total_transferred_pages();
3066 3067

    trace_migrate_transferred(transferred, time_spent,
3068
                              bandwidth, s->threshold_size);
3069 3070
}

3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
/* Migration thread iteration status */
typedef enum {
    MIG_ITERATE_RESUME,         /* Resume current iteration */
    MIG_ITERATE_SKIP,           /* Skip current iteration */
    MIG_ITERATE_BREAK,          /* Break the loop */
} MigIterateState;

/*
 * Return true if continue to the next iteration directly, false
 * otherwise.
 */
static MigIterateState migration_iteration_run(MigrationState *s)
{
3084
    uint64_t pending_size, pend_pre, pend_compat, pend_post;
3085 3086
    bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;

3087 3088 3089
    qemu_savevm_state_pending(s->to_dst_file, s->threshold_size, &pend_pre,
                              &pend_compat, &pend_post);
    pending_size = pend_pre + pend_compat + pend_post;
3090 3091

    trace_migrate_pending(pending_size, s->threshold_size,
3092
                          pend_pre, pend_compat, pend_post);
3093 3094 3095 3096

    if (pending_size && pending_size >= s->threshold_size) {
        /* Still a significant amount to transfer */
        if (migrate_postcopy() && !in_postcopy &&
3097
            pend_pre <= s->threshold_size &&
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115
            atomic_read(&s->start_postcopy)) {
            if (postcopy_start(s)) {
                error_report("%s: postcopy failed to start", __func__);
            }
            return MIG_ITERATE_SKIP;
        }
        /* Just another iteration step */
        qemu_savevm_state_iterate(s->to_dst_file,
            s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
    } else {
        trace_migration_thread_low_pending(pending_size);
        migration_completion(s);
        return MIG_ITERATE_BREAK;
    }

    return MIG_ITERATE_RESUME;
}

3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
static void migration_iteration_finish(MigrationState *s)
{
    /* If we enabled cpu throttling for auto-converge, turn it off. */
    cpu_throttle_stop();

    qemu_mutex_lock_iothread();
    switch (s->state) {
    case MIGRATION_STATUS_COMPLETED:
        migration_calculate_complete(s);
        runstate_set(RUN_STATE_POSTMIGRATE);
        break;

    case MIGRATION_STATUS_ACTIVE:
        /*
         * We should really assert here, but since it's during
         * migration, let's try to reduce the usage of assertions.
         */
        if (!migrate_colo_enabled()) {
            error_report("%s: critical error: calling COLO code without "
                         "COLO enabled", __func__);
        }
        migrate_start_colo_process(s);
        /*
         * Fixme: we will run VM in COLO no matter its old running state.
         * After exited COLO, we will keep running.
         */
        s->vm_was_running = true;
        /* Fallthrough */
    case MIGRATION_STATUS_FAILED:
    case MIGRATION_STATUS_CANCELLED:
3146
    case MIGRATION_STATUS_CANCELLING:
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
        if (s->vm_was_running) {
            vm_start();
        } else {
            if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
                runstate_set(RUN_STATE_POSTMIGRATE);
            }
        }
        break;

    default:
        /* Should not reach here, but if so, forgive the VM. */
        error_report("%s: Unknown ending state %d", __func__, s->state);
        break;
    }
3161
    migrate_fd_cleanup_schedule(s);
3162 3163 3164
    qemu_mutex_unlock_iothread();
}

3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
void migration_make_urgent_request(void)
{
    qemu_sem_post(&migrate_get_current()->rate_limit_sem);
}

void migration_consume_urgent_request(void)
{
    qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
}

3175 3176 3177 3178
/*
 * Master migration thread on the source VM.
 * It drives the migration and pumps the data down the outgoing channel.
 */
J
Juan Quintela 已提交
3179
static void *migration_thread(void *opaque)
3180
{
3181
    MigrationState *s = opaque;
3182
    int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
3183
    MigThrError thr_error;
3184
    bool urgent = false;
3185

3186 3187
    rcu_register_thread();

3188
    object_ref(OBJECT(s));
3189 3190
    s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);

3191
    qemu_savevm_state_header(s->to_dst_file);
3192

3193 3194 3195 3196 3197
    /*
     * If we opened the return path, we need to make sure dst has it
     * opened as well.
     */
    if (s->rp_state.from_dst_file) {
3198
        /* Now tell the dest that it should open its end so it can reply */
3199
        qemu_savevm_send_open_return_path(s->to_dst_file);
3200 3201

        /* And do a ping that will make stuff easier to debug */
3202
        qemu_savevm_send_ping(s->to_dst_file, 1);
P
Peter Xu 已提交
3203
    }
3204

3205
    if (migrate_postcopy()) {
3206 3207 3208 3209 3210
        /*
         * Tell the destination that we *might* want to do postcopy later;
         * if the other end can't do postcopy it should fail now, nice and
         * early.
         */
3211
        qemu_savevm_send_postcopy_advise(s->to_dst_file);
3212 3213
    }

3214 3215 3216 3217 3218
    if (migrate_colo_enabled()) {
        /* Notify migration destination that we enable COLO */
        qemu_savevm_send_colo_enable(s->to_dst_file);
    }

3219
    qemu_savevm_state_setup(s->to_dst_file);
3220

3221
    s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3222 3223
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_ACTIVE);
3224

3225 3226 3227 3228
    trace_migration_thread_setup_complete();

    while (s->state == MIGRATION_STATUS_ACTIVE ||
           s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
3229
        int64_t current_time;
3230

3231
        if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
3232 3233 3234 3235
            MigIterateState iter_state = migration_iteration_run(s);
            if (iter_state == MIG_ITERATE_SKIP) {
                continue;
            } else if (iter_state == MIG_ITERATE_BREAK) {
3236
                break;
3237 3238
            }
        }
3239

3240 3241 3242 3243 3244 3245 3246
        /*
         * Try to detect any kind of failures, and see whether we
         * should stop the migration now.
         */
        thr_error = migration_detect_error(s);
        if (thr_error == MIG_THR_ERR_FATAL) {
            /* Stop migration */
3247
            break;
3248 3249 3250 3251 3252 3253 3254 3255
        } else if (thr_error == MIG_THR_ERR_RECOVERED) {
            /*
             * Just recovered from a e.g. network failure, reset all
             * the local variables. This is important to avoid
             * breaking transferred_bytes and bandwidth calculation
             */
            s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
            s->iteration_initial_bytes = 0;
3256
        }
3257

3258
        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3259

3260 3261
        migration_update_counters(s, current_time);

3262
        urgent = false;
3263
        if (qemu_file_rate_limit(s->to_dst_file)) {
3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
            /* Wait for a delay to do rate limiting OR
             * something urgent to post the semaphore.
             */
            int ms = s->iteration_start_time + BUFFER_DELAY - current_time;
            trace_migration_thread_ratelimit_pre(ms);
            if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
                /* We were worken by one or more urgent things but
                 * the timedwait will have consumed one of them.
                 * The service routine for the urgent wake will dec
                 * the semaphore itself for each item it consumes,
                 * so add this one we just eat back.
                 */
                qemu_sem_post(&s->rate_limit_sem);
                urgent = true;
            }
            trace_migration_thread_ratelimit_post(urgent);
3280
        }
3281 3282
    }

3283
    trace_migration_thread_after_loop();
3284
    migration_iteration_finish(s);
3285
    object_unref(OBJECT(s));
3286
    rcu_unregister_thread();
3287 3288 3289
    return NULL;
}

3290
void migrate_fd_connect(MigrationState *s, Error *error_in)
3291
{
P
Peter Xu 已提交
3292 3293 3294
    int64_t rate_limit;
    bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;

3295
    s->expected_downtime = s->parameters.downtime_limit;
3296
    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
3297 3298 3299 3300 3301
    if (error_in) {
        migrate_fd_error(s, error_in);
        migrate_fd_cleanup(s);
        return;
    }
3302

P
Peter Xu 已提交
3303 3304 3305 3306 3307 3308
    if (resume) {
        /* This is a resumed migration */
        rate_limit = INT64_MAX;
    } else {
        /* This is a fresh new migration */
        rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO;
3309

P
Peter Xu 已提交
3310 3311 3312 3313 3314 3315
        /* Notify before starting migration thread */
        notifier_list_notify(&migration_state_notifiers, s);
    }

    qemu_file_set_rate_limit(s->to_dst_file, rate_limit);
    qemu_file_set_blocking(s->to_dst_file, true);
3316

3317
    /*
3318 3319 3320
     * Open the return path. For postcopy, it is used exclusively. For
     * precopy, only if user specified "return-path" capability would
     * QEMU uses the return path.
3321
     */
3322
    if (migrate_postcopy_ram() || migrate_use_return_path()) {
P
Peter Xu 已提交
3323
        if (open_return_path_on_source(s, !resume)) {
3324
            error_report("Unable to open return-path for postcopy");
P
Peter Xu 已提交
3325
            migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
3326 3327 3328 3329 3330
            migrate_fd_cleanup(s);
            return;
        }
    }

P
Peter Xu 已提交
3331
    if (resume) {
3332 3333 3334 3335
        /* Wakeup the main migration thread to do the recovery */
        migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
                          MIGRATION_STATUS_POSTCOPY_RECOVER);
        qemu_sem_post(&s->postcopy_pause_sem);
P
Peter Xu 已提交
3336 3337 3338
        return;
    }

3339 3340 3341 3342 3343 3344
    if (multifd_save_setup() != 0) {
        migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                          MIGRATION_STATUS_FAILED);
        migrate_fd_cleanup(s);
        return;
    }
3345
    qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
3346
                       QEMU_THREAD_JOINABLE);
3347
    s->migration_thread_running = true;
3348
}
3349

P
Peter Xu 已提交
3350 3351 3352 3353
void migration_global_dump(Monitor *mon)
{
    MigrationState *ms = migrate_get_current();

3354 3355 3356 3357
    monitor_printf(mon, "globals:\n");
    monitor_printf(mon, "store-global-state: %s\n",
                   ms->store_global_state ? "on" : "off");
    monitor_printf(mon, "only-migratable: %s\n",
3358
                   only_migratable ? "on" : "off");
3359 3360 3361 3362
    monitor_printf(mon, "send-configuration: %s\n",
                   ms->send_configuration ? "on" : "off");
    monitor_printf(mon, "send-section-footer: %s\n",
                   ms->send_section_footer ? "on" : "off");
3363 3364
    monitor_printf(mon, "decompress-error-check: %s\n",
                   ms->decompress_error_check ? "on" : "off");
P
Peter Xu 已提交
3365 3366
}

3367 3368 3369
#define DEFINE_PROP_MIG_CAP(name, x)             \
    DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false)

3370 3371 3372
static Property migration_properties[] = {
    DEFINE_PROP_BOOL("store-global-state", MigrationState,
                     store_global_state, true),
3373 3374
    DEFINE_PROP_BOOL("send-configuration", MigrationState,
                     send_configuration, true),
P
Peter Xu 已提交
3375 3376
    DEFINE_PROP_BOOL("send-section-footer", MigrationState,
                     send_section_footer, true),
3377 3378
    DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
                      decompress_error_check, true),
P
Peter Xu 已提交
3379 3380

    /* Migration parameters */
3381
    DEFINE_PROP_UINT8("x-compress-level", MigrationState,
P
Peter Xu 已提交
3382 3383
                      parameters.compress_level,
                      DEFAULT_MIGRATE_COMPRESS_LEVEL),
3384
    DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
P
Peter Xu 已提交
3385 3386
                      parameters.compress_threads,
                      DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
3387 3388
    DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
                      parameters.compress_wait_thread, true),
3389
    DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
P
Peter Xu 已提交
3390 3391
                      parameters.decompress_threads,
                      DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
3392
    DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState,
P
Peter Xu 已提交
3393 3394
                      parameters.cpu_throttle_initial,
                      DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
3395
    DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState,
P
Peter Xu 已提交
3396 3397
                      parameters.cpu_throttle_increment,
                      DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
3398
    DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState,
P
Peter Xu 已提交
3399
                      parameters.max_bandwidth, MAX_THROTTLE),
3400
    DEFINE_PROP_UINT64("x-downtime-limit", MigrationState,
P
Peter Xu 已提交
3401 3402
                      parameters.downtime_limit,
                      DEFAULT_MIGRATE_SET_DOWNTIME),
3403
    DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState,
P
Peter Xu 已提交
3404 3405
                      parameters.x_checkpoint_delay,
                      DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
J
Juan Quintela 已提交
3406 3407
    DEFINE_PROP_UINT8("multifd-channels", MigrationState,
                      parameters.multifd_channels,
3408
                      DEFAULT_MIGRATE_MULTIFD_CHANNELS),
3409 3410 3411
    DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState,
                      parameters.xbzrle_cache_size,
                      DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE),
3412 3413 3414
    DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState,
                      parameters.max_postcopy_bandwidth,
                      DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH),
3415 3416 3417
    DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState,
                      parameters.max_cpu_throttle,
                      DEFAULT_MIGRATE_MAX_CPU_THROTTLE),
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429
    DEFINE_PROP_SIZE("announce-initial", MigrationState,
                      parameters.announce_initial,
                      DEFAULT_MIGRATE_ANNOUNCE_INITIAL),
    DEFINE_PROP_SIZE("announce-max", MigrationState,
                      parameters.announce_max,
                      DEFAULT_MIGRATE_ANNOUNCE_MAX),
    DEFINE_PROP_SIZE("announce-rounds", MigrationState,
                      parameters.announce_rounds,
                      DEFAULT_MIGRATE_ANNOUNCE_ROUNDS),
    DEFINE_PROP_SIZE("announce-step", MigrationState,
                      parameters.announce_step,
                      DEFAULT_MIGRATE_ANNOUNCE_STEP),
3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442

    /* Migration capabilities */
    DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE),
    DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL),
    DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE),
    DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS),
    DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS),
    DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS),
    DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
    DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
    DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
    DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK),
    DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),
J
Juan Quintela 已提交
3443
    DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD),
3444

3445 3446 3447
    DEFINE_PROP_END_OF_LIST(),
};

3448 3449 3450 3451 3452
static void migration_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);

    dc->user_creatable = false;
3453
    dc->props = migration_properties;
3454 3455
}

M
Marc-André Lureau 已提交
3456 3457 3458 3459 3460
static void migration_instance_finalize(Object *obj)
{
    MigrationState *ms = MIGRATION_OBJ(obj);
    MigrationParameters *params = &ms->parameters;

3461
    qemu_mutex_destroy(&ms->error_mutex);
3462
    qemu_mutex_destroy(&ms->qemu_file_lock);
M
Marc-André Lureau 已提交
3463 3464
    g_free(params->tls_hostname);
    g_free(params->tls_creds);
3465
    qemu_sem_destroy(&ms->rate_limit_sem);
3466
    qemu_sem_destroy(&ms->pause_sem);
3467
    qemu_sem_destroy(&ms->postcopy_pause_sem);
3468
    qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
3469
    qemu_sem_destroy(&ms->rp_state.rp_sem);
3470
    error_free(ms->error);
M
Marc-André Lureau 已提交
3471 3472
}

3473 3474 3475
static void migration_instance_init(Object *obj)
{
    MigrationState *ms = MIGRATION_OBJ(obj);
3476
    MigrationParameters *params = &ms->parameters;
3477 3478 3479

    ms->state = MIGRATION_STATUS_NONE;
    ms->mbps = -1;
3480
    ms->pages_per_second = -1;
3481
    qemu_sem_init(&ms->pause_sem, 0);
3482
    qemu_mutex_init(&ms->error_mutex);
3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496

    params->tls_hostname = g_strdup("");
    params->tls_creds = g_strdup("");

    /* Set has_* up only for parameter checks */
    params->has_compress_level = true;
    params->has_compress_threads = true;
    params->has_decompress_threads = true;
    params->has_cpu_throttle_initial = true;
    params->has_cpu_throttle_increment = true;
    params->has_max_bandwidth = true;
    params->has_downtime_limit = true;
    params->has_x_checkpoint_delay = true;
    params->has_block_incremental = true;
J
Juan Quintela 已提交
3497
    params->has_multifd_channels = true;
3498
    params->has_xbzrle_cache_size = true;
3499
    params->has_max_postcopy_bandwidth = true;
3500
    params->has_max_cpu_throttle = true;
3501 3502 3503 3504
    params->has_announce_initial = true;
    params->has_announce_max = true;
    params->has_announce_rounds = true;
    params->has_announce_step = true;
3505 3506

    qemu_sem_init(&ms->postcopy_pause_sem, 0);
3507
    qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
3508
    qemu_sem_init(&ms->rp_state.rp_sem, 0);
3509
    qemu_sem_init(&ms->rate_limit_sem, 0);
3510
    qemu_mutex_init(&ms->qemu_file_lock);
3511 3512 3513 3514 3515 3516 3517 3518
}

/*
 * Return true if check pass, false otherwise. Error will be put
 * inside errp if provided.
 */
static bool migration_object_check(MigrationState *ms, Error **errp)
{
3519 3520 3521 3522 3523
    MigrationCapabilityStatusList *head = NULL;
    /* Assuming all off */
    bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret;
    int i;

3524 3525 3526 3527
    if (!migrate_params_check(&ms->parameters, errp)) {
        return false;
    }

3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539
    for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
        if (ms->enabled_capabilities[i]) {
            head = migrate_cap_add(head, i, true);
        }
    }

    ret = migrate_caps_check(cap_list, head, errp);

    /* It works with head == NULL */
    qapi_free_MigrationCapabilityStatusList(head);

    return ret;
3540 3541 3542 3543
}

static const TypeInfo migration_type = {
    .name = TYPE_MIGRATION,
3544
    /*
P
Peter Xu 已提交
3545 3546 3547 3548 3549 3550
     * NOTE: TYPE_MIGRATION is not really a device, as the object is
     * not created using qdev_create(), it is not attached to the qdev
     * device tree, and it is never realized.
     *
     * TODO: Make this TYPE_OBJECT once QOM provides something like
     * TYPE_DEVICE's "-global" properties.
3551
     */
3552 3553 3554 3555 3556
    .parent = TYPE_DEVICE,
    .class_init = migration_class_init,
    .class_size = sizeof(MigrationClass),
    .instance_size = sizeof(MigrationState),
    .instance_init = migration_instance_init,
M
Marc-André Lureau 已提交
3557
    .instance_finalize = migration_instance_finalize,
3558 3559 3560 3561 3562 3563 3564 3565
};

static void register_migration_types(void)
{
    type_register_static(&migration_type);
}

type_init(register_migration_types);