migration.c 64.9 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * QEMU live migration
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
A
aliguori 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qemu/cutils.h"
18
#include "qemu/error-report.h"
19
#include "migration/blocker.h"
20
#include "exec.h"
21
#include "fd.h"
22
#include "socket.h"
23
#include "rdma.h"
24
#include "ram.h"
J
Juan Quintela 已提交
25
#include "migration/global_state.h"
26
#include "migration/misc.h"
27
#include "migration.h"
28
#include "savevm.h"
29
#include "qemu-file-channel.h"
J
Juan Quintela 已提交
30
#include "qemu-file.h"
31
#include "migration/vmstate.h"
32
#include "block/block.h"
33
#include "qapi/qmp/qerror.h"
34
#include "qapi/util.h"
35
#include "qemu/rcu.h"
36
#include "block.h"
37
#include "postcopy-ram.h"
38
#include "qemu/thread.h"
L
Luiz Capitulino 已提交
39
#include "qmp-commands.h"
40
#include "trace.h"
41
#include "qapi-event.h"
42
#include "exec/target_page.h"
43
#include "io/channel-buffer.h"
44
#include "migration/colo.h"
45
#include "hw/boards.h"
P
Peter Xu 已提交
46
#include "monitor/monitor.h"
47

48
#define MAX_THROTTLE  (32 << 20)      /* Migration transfer speed throttling */
A
aliguori 已提交
49

J
Juan Quintela 已提交
50 51 52 53 54
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
 * data. */
#define BUFFER_DELAY     100
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)

55 56 57 58
/* Time in milliseconds we are allowed to stop the source,
 * for sending the last part */
#define DEFAULT_MIGRATE_SET_DOWNTIME 300

59 60 61 62
/* Maximum migrate downtime set to 2000 seconds */
#define MAX_MIGRATE_DOWNTIME_SECONDS 2000
#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)

63 64
/* Default compression thread count */
#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
65 66 67
/* Default decompression thread count, usually decompression is at
 * least 4 times as fast as compression.*/
#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
68 69
/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
70
/* Define default autoconverge cpu throttle migration parameters */
71 72
#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
73

74 75 76
/* Migration XBZRLE default cache size */
#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)

77 78 79 80 81
/* The delay time (in ms) between two COLO checkpoints
 * Note: Please change this default value to 10000 when we support hybrid mode.
 */
#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200

82 83 84
static NotifierList migration_state_notifiers =
    NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);

D
Dr. David Alan Gilbert 已提交
85 86
static bool deferred_incoming;

87 88 89 90 91 92 93 94 95 96 97 98
/* Messages sent on the return path from destination to source */
enum mig_rp_message_type {
    MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
    MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
    MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */

    MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
    MIG_RP_MSG_REQ_PAGES,    /* data (start: be64, len: be32) */

    MIG_RP_MSG_MAX
};

99 100 101 102
/* When we add fault tolerance, we could have several
   migrations at once.  For now we don't need to add
   dynamic creation of migration */

103 104 105 106
static MigrationState *current_migration;

void migration_object_init(void)
{
107 108
    MachineState *ms = MACHINE(qdev_get_machine());

109 110 111
    /* This can only be called once. */
    assert(!current_migration);
    current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
112 113 114 115 116 117 118 119 120

    /*
     * We cannot really do this in migration_instance_init() since at
     * that time global properties are not yet applied, then this
     * value will be definitely replaced by something else.
     */
    if (ms->enforce_config_section) {
        current_migration->send_configuration = true;
    }
121 122
}

123
/* For outgoing */
124
MigrationState *migrate_get_current(void)
125
{
126 127 128
    /* This can only be called after the object created. */
    assert(current_migration);
    return current_migration;
129 130
}

131 132
MigrationIncomingState *migration_incoming_get_current(void)
{
133 134
    static bool once;
    static MigrationIncomingState mis_current;
135

136 137 138 139 140 141 142 143
    if (!once) {
        mis_current.state = MIGRATION_STATUS_NONE;
        memset(&mis_current, 0, sizeof(MigrationIncomingState));
        qemu_mutex_init(&mis_current.rp_mutex);
        qemu_event_init(&mis_current.main_thread_load_event, false);
        once = true;
    }
    return &mis_current;
144 145 146 147
}

void migration_incoming_state_destroy(void)
{
148 149
    struct MigrationIncomingState *mis = migration_incoming_get_current();

150
    if (mis->to_src_file) {
151 152
        /* Tell source that we are done */
        migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
153 154 155 156
        qemu_fclose(mis->to_src_file);
        mis->to_src_file = NULL;
    }

157 158 159 160 161
    if (mis->from_src_file) {
        qemu_fclose(mis->from_src_file);
        mis->from_src_file = NULL;
    }

162
    qemu_event_destroy(&mis->main_thread_load_event);
163 164
}

165 166 167 168 169 170 171
static void migrate_generate_event(int new_state)
{
    if (migrate_use_events()) {
        qapi_event_send_migration(new_state, &error_abort);
    }
}

D
Dr. David Alan Gilbert 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * Called on -incoming with a defer: uri.
 * The migration can be started later after any parameters have been
 * changed.
 */
static void deferred_incoming_migration(Error **errp)
{
    if (deferred_incoming) {
        error_setg(errp, "Incoming migration already deferred");
    }
    deferred_incoming = true;
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
/*
 * Send a message on the return channel back to the source
 * of the migration.
 */
static void migrate_send_rp_message(MigrationIncomingState *mis,
                                    enum mig_rp_message_type message_type,
                                    uint16_t len, void *data)
{
    trace_migrate_send_rp_message((int)message_type, len);
    qemu_mutex_lock(&mis->rp_mutex);
    qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
    qemu_put_be16(mis->to_src_file, len);
    qemu_put_buffer(mis->to_src_file, data, len);
    qemu_fflush(mis->to_src_file);
    qemu_mutex_unlock(&mis->rp_mutex);
}

202 203 204 205 206 207 208 209 210 211
/* Request a range of pages from the source VM at the given
 * start address.
 *   rbname: Name of the RAMBlock to request the page in, if NULL it's the same
 *           as the last request (a name must have been given previously)
 *   Start: Address offset within the RB
 *   Len: Length in bytes required - must be a multiple of pagesize
 */
void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
                               ram_addr_t start, size_t len)
{
S
Stefan Weil 已提交
212
    uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    size_t msglen = 12; /* start + len */

    *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
    *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);

    if (rbname) {
        int rbname_len = strlen(rbname);
        assert(rbname_len < 256);

        bufc[msglen++] = rbname_len;
        memcpy(bufc + msglen, rbname, rbname_len);
        msglen += rbname_len;
        migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
    } else {
        migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
    }
}

231
void qemu_start_incoming_migration(const char *uri, Error **errp)
A
aliguori 已提交
232
{
A
aliguori 已提交
233 234
    const char *p;

235
    qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
D
Dr. David Alan Gilbert 已提交
236 237 238
    if (!strcmp(uri, "defer")) {
        deferred_incoming_migration(errp);
    } else if (strstart(uri, "tcp:", &p)) {
239
        tcp_start_incoming_migration(p, errp);
M
Michael R. Hines 已提交
240
#ifdef CONFIG_RDMA
D
Dr. David Alan Gilbert 已提交
241
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
242 243
        rdma_start_incoming_migration(p, errp);
#endif
D
Dr. David Alan Gilbert 已提交
244
    } else if (strstart(uri, "exec:", &p)) {
245
        exec_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
246
    } else if (strstart(uri, "unix:", &p)) {
247
        unix_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
248
    } else if (strstart(uri, "fd:", &p)) {
249
        fd_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
250
    } else {
251
        error_setg(errp, "unknown migration protocol: %s", uri);
J
Juan Quintela 已提交
252
    }
A
aliguori 已提交
253 254
}

255 256 257 258 259
static void process_incoming_migration_bh(void *opaque)
{
    Error *local_err = NULL;
    MigrationIncomingState *mis = opaque;

260 261
    /* Make sure all file formats flush their mutable metadata.
     * If we get an error here, just don't restart the VM yet. */
262
    bdrv_invalidate_cache_all(&local_err);
263
    if (local_err) {
264
        error_report_err(local_err);
265 266 267 268
        local_err = NULL;
        autostart = false;
    }

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
    /*
     * This must happen after all error conditions are dealt with and
     * we're sure the VM is going to be running on this host.
     */
    qemu_announce_self();

    /* If global state section was not received or we are in running
       state, we need to obey autostart. Any other state is set with
       runstate_set. */

    if (!global_state_received() ||
        global_state_get_runstate() == RUN_STATE_RUNNING) {
        if (autostart) {
            vm_start();
        } else {
            runstate_set(RUN_STATE_PAUSED);
        }
    } else {
        runstate_set(global_state_get_runstate());
    }
    /*
     * This must happen after any state changes since as soon as an external
     * observer sees this event they might start to prod at the VM assuming
     * it's ready to use.
     */
    migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                      MIGRATION_STATUS_COMPLETED);
    qemu_bh_delete(mis->bh);
    migration_incoming_state_destroy();
}

300
static void process_incoming_migration_co(void *opaque)
301
{
302
    QEMUFile *f = opaque;
303
    MigrationIncomingState *mis = migration_incoming_get_current();
304
    PostcopyState ps;
305 306
    int ret;

307
    mis->from_src_file = f;
308
    mis->largest_page_size = qemu_ram_pagesize_largest();
309
    postcopy_state_set(POSTCOPY_INCOMING_NONE);
310 311
    migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
                      MIGRATION_STATUS_ACTIVE);
312
    ret = qemu_loadvm_state(f);
313

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
    ps = postcopy_state_get();
    trace_process_incoming_migration_co_end(ret, ps);
    if (ps != POSTCOPY_INCOMING_NONE) {
        if (ps == POSTCOPY_INCOMING_ADVISE) {
            /*
             * Where a migration had postcopy enabled (and thus went to advise)
             * but managed to complete within the precopy period, we can use
             * the normal exit.
             */
            postcopy_ram_incoming_cleanup(mis);
        } else if (ret >= 0) {
            /*
             * Postcopy was started, cleanup should happen at the end of the
             * postcopy thread.
             */
            trace_process_incoming_migration_co_postcopy_end_main();
            return;
        }
        /* Else if something went wrong then just fall out of the normal exit */
    }

335 336 337 338 339 340 341 342 343 344 345 346
    /* we get COLO info, and know if we are in COLO mode */
    if (!ret && migration_incoming_enable_colo()) {
        mis->migration_incoming_co = qemu_coroutine_self();
        qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
             colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
        mis->have_colo_incoming_thread = true;
        qemu_coroutine_yield();

        /* Wait checkpoint incoming thread exit before free resource */
        qemu_thread_join(&mis->colo_incoming_thread);
    }

347
    if (ret < 0) {
348 349
        migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                          MIGRATION_STATUS_FAILED);
350
        error_report("load of migration failed: %s", strerror(-ret));
351
        qemu_fclose(mis->from_src_file);
352
        exit(EXIT_FAILURE);
353
    }
354 355
    mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
    qemu_bh_schedule(mis->bh);
356 357
}

358
void migration_fd_process_incoming(QEMUFile *f)
359
{
360
    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
361

362
    qemu_file_set_blocking(f, false);
363
    qemu_coroutine_enter(co);
364 365
}

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
/*
 * Send a 'SHUT' message on the return channel with the given value
 * to indicate that we've finished with the RP.  Non-0 value indicates
 * error.
 */
void migrate_send_rp_shut(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
}

/*
 * Send a 'PONG' message on the return channel with the given value
 * (normally in response to a 'PING')
 */
void migrate_send_rp_pong(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
}

O
Orit Wasserman 已提交
393 394 395 396 397 398 399
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
{
    MigrationCapabilityStatusList *head = NULL;
    MigrationCapabilityStatusList *caps;
    MigrationState *s = migrate_get_current();
    int i;

400
    caps = NULL; /* silence compiler warning */
401
    for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
402 403 404 405 406
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
        if (i == MIGRATION_CAPABILITY_BLOCK) {
            continue;
        }
#endif
407 408 409
        if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
            continue;
        }
O
Orit Wasserman 已提交
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
        if (head == NULL) {
            head = g_malloc0(sizeof(*caps));
            caps = head;
        } else {
            caps->next = g_malloc0(sizeof(*caps));
            caps = caps->next;
        }
        caps->value =
            g_malloc(sizeof(*caps->value));
        caps->value->capability = i;
        caps->value->state = s->enabled_capabilities[i];
    }

    return head;
}

426 427 428 429 430 431
MigrationParameters *qmp_query_migrate_parameters(Error **errp)
{
    MigrationParameters *params;
    MigrationState *s = migrate_get_current();

    params = g_malloc0(sizeof(*params));
432
    params->has_compress_level = true;
433
    params->compress_level = s->parameters.compress_level;
434
    params->has_compress_threads = true;
435
    params->compress_threads = s->parameters.compress_threads;
436
    params->has_decompress_threads = true;
437
    params->decompress_threads = s->parameters.decompress_threads;
438
    params->has_cpu_throttle_initial = true;
439
    params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
440
    params->has_cpu_throttle_increment = true;
441
    params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
442
    params->has_tls_creds = !!s->parameters.tls_creds;
443
    params->tls_creds = g_strdup(s->parameters.tls_creds);
444
    params->has_tls_hostname = !!s->parameters.tls_hostname;
445
    params->tls_hostname = g_strdup(s->parameters.tls_hostname);
446 447 448 449
    params->has_max_bandwidth = true;
    params->max_bandwidth = s->parameters.max_bandwidth;
    params->has_downtime_limit = true;
    params->downtime_limit = s->parameters.downtime_limit;
450
    params->has_x_checkpoint_delay = true;
451
    params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
452 453
    params->has_block_incremental = true;
    params->block_incremental = s->parameters.block_incremental;
454 455 456 457

    return params;
}

458 459 460 461 462 463 464 465
/*
 * Return true if we're already in the middle of a migration
 * (i.e. any of the active or setup states)
 */
static bool migration_is_setup_or_active(int state)
{
    switch (state) {
    case MIGRATION_STATUS_ACTIVE:
466
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
467 468 469 470 471 472 473 474 475
    case MIGRATION_STATUS_SETUP:
        return true;

    default:
        return false;

    }
}

476 477 478 479
static void populate_ram_info(MigrationInfo *info, MigrationState *s)
{
    info->has_ram = true;
    info->ram = g_malloc0(sizeof(*info->ram));
480
    info->ram->transferred = ram_counters.transferred;
481
    info->ram->total = ram_bytes_total();
482
    info->ram->duplicate = ram_counters.duplicate;
483 484
    /* legacy value.  It is not used anymore */
    info->ram->skipped = 0;
485 486
    info->ram->normal = ram_counters.normal;
    info->ram->normal_bytes = ram_counters.normal *
487
        qemu_target_page_size();
488
    info->ram->mbps = s->mbps;
489 490
    info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
    info->ram->postcopy_requests = ram_counters.postcopy_requests;
491
    info->ram->page_size = qemu_target_page_size();
492

493 494 495 496
    if (migrate_use_xbzrle()) {
        info->has_xbzrle_cache = true;
        info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
        info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
497 498 499 500 501
        info->xbzrle_cache->bytes = xbzrle_counters.bytes;
        info->xbzrle_cache->pages = xbzrle_counters.pages;
        info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
        info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
        info->xbzrle_cache->overflow = xbzrle_counters.overflow;
502 503
    }

504 505 506 507 508
    if (cpu_throttle_active()) {
        info->has_cpu_throttle_percentage = true;
        info->cpu_throttle_percentage = cpu_throttle_get_percentage();
    }

509 510
    if (s->state != MIGRATION_STATUS_COMPLETED) {
        info->ram->remaining = ram_bytes_remaining();
511
        info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate;
512 513 514
    }
}

515 516 517 518 519 520 521 522 523 524 525
static void populate_disk_info(MigrationInfo *info)
{
    if (blk_mig_active()) {
        info->has_disk = true;
        info->disk = g_malloc0(sizeof(*info->disk));
        info->disk->transferred = blk_mig_bytes_transferred();
        info->disk->remaining = blk_mig_bytes_remaining();
        info->disk->total = blk_mig_bytes_total();
    }
}

L
Luiz Capitulino 已提交
526
MigrationInfo *qmp_query_migrate(Error **errp)
A
aliguori 已提交
527
{
L
Luiz Capitulino 已提交
528
    MigrationInfo *info = g_malloc0(sizeof(*info));
529 530 531
    MigrationState *s = migrate_get_current();

    switch (s->state) {
532
    case MIGRATION_STATUS_NONE:
533 534
        /* no migration has happened ever */
        break;
535
    case MIGRATION_STATUS_SETUP:
536
        info->has_status = true;
537
        info->has_total_time = false;
538
        break;
539 540
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_CANCELLING:
541
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
542
         /* TODO add some postcopy stats */
543 544 545 546 547 548 549 550 551
        info->has_status = true;
        info->has_total_time = true;
        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
            - s->total_time;
        info->has_expected_downtime = true;
        info->expected_downtime = s->expected_downtime;
        info->has_setup_time = true;
        info->setup_time = s->setup_time;

552
        populate_ram_info(info, s);
553
        populate_disk_info(info);
554
        break;
555 556 557 558
    case MIGRATION_STATUS_COLO:
        info->has_status = true;
        /* TODO: display COLO specific information (checkpoint info etc.) */
        break;
559
    case MIGRATION_STATUS_COMPLETED:
L
Luiz Capitulino 已提交
560
        info->has_status = true;
561
        info->has_total_time = true;
562
        info->total_time = s->total_time;
563 564
        info->has_downtime = true;
        info->downtime = s->downtime;
565 566
        info->has_setup_time = true;
        info->setup_time = s->setup_time;
J
Juan Quintela 已提交
567

568
        populate_ram_info(info, s);
569
        break;
570
    case MIGRATION_STATUS_FAILED:
L
Luiz Capitulino 已提交
571
        info->has_status = true;
572 573 574 575
        if (s->error) {
            info->has_error_desc = true;
            info->error_desc = g_strdup(error_get_pretty(s->error));
        }
576
        break;
577
    case MIGRATION_STATUS_CANCELLED:
L
Luiz Capitulino 已提交
578
        info->has_status = true;
579
        break;
A
aliguori 已提交
580
    }
581
    info->status = s->state;
L
Luiz Capitulino 已提交
582 583

    return info;
A
aliguori 已提交
584 585
}

O
Orit Wasserman 已提交
586 587 588 589 590
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
                                  Error **errp)
{
    MigrationState *s = migrate_get_current();
    MigrationCapabilityStatusList *cap;
591
    bool old_postcopy_cap = migrate_postcopy_ram();
O
Orit Wasserman 已提交
592

593
    if (migration_is_setup_or_active(s->state)) {
594
        error_setg(errp, QERR_MIGRATION_ACTIVE);
O
Orit Wasserman 已提交
595 596 597 598
        return;
    }

    for (cap = params; cap; cap = cap->next) {
599 600 601 602 603 604 605 606 607
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
        if (cap->value->capability == MIGRATION_CAPABILITY_BLOCK
            && cap->value->state) {
            error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
                       "block migration");
            error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
            continue;
        }
#endif
608 609 610 611 612 613 614 615
        if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
            if (!colo_supported()) {
                error_setg(errp, "COLO is not currently supported, please"
                             " configure with --enable-colo option in order to"
                             " support COLO feature");
                continue;
            }
        }
O
Orit Wasserman 已提交
616 617
        s->enabled_capabilities[cap->value->capability] = cap->value->state;
    }
618 619 620 621 622 623 624 625 626 627

    if (migrate_postcopy_ram()) {
        if (migrate_use_compression()) {
            /* The decompression threads asynchronously write into RAM
             * rather than use the atomic copies needed to avoid
             * userfaulting.  It should be possible to fix the decompression
             * threads for compatibility in future.
             */
            error_report("Postcopy is not currently compatible with "
                         "compression");
D
Dr. David Alan Gilbert 已提交
628
            s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
629 630
                false;
        }
631 632 633 634 635 636 637 638 639 640 641 642 643
        /* This check is reasonably expensive, so only when it's being
         * set the first time, also it's only the destination that needs
         * special support.
         */
        if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
            !postcopy_ram_supported_by_host()) {
            /* postcopy_ram_supported_by_host will have emitted a more
             * detailed message
             */
            error_report("Postcopy is not supported");
            s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
                false;
        }
644
    }
O
Orit Wasserman 已提交
645 646
}

647
void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
648 649 650
{
    MigrationState *s = migrate_get_current();

651 652
    if (params->has_compress_level &&
        (params->compress_level < 0 || params->compress_level > 9)) {
653 654
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
                   "is invalid, it should be in the range of 0 to 9");
655 656
        return;
    }
657 658
    if (params->has_compress_threads &&
        (params->compress_threads < 1 || params->compress_threads > 255)) {
659 660 661
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "compress_threads",
                   "is invalid, it should be in the range of 1 to 255");
662 663
        return;
    }
664 665
    if (params->has_decompress_threads &&
        (params->decompress_threads < 1 || params->decompress_threads > 255)) {
666 667 668
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "decompress_threads",
                   "is invalid, it should be in the range of 1 to 255");
669 670
        return;
    }
671 672 673
    if (params->has_cpu_throttle_initial &&
        (params->cpu_throttle_initial < 1 ||
         params->cpu_throttle_initial > 99)) {
674
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
675
                   "cpu_throttle_initial",
676
                   "an integer in the range of 1 to 99");
677
        return;
678
    }
679 680 681
    if (params->has_cpu_throttle_increment &&
        (params->cpu_throttle_increment < 1 ||
         params->cpu_throttle_increment > 99)) {
682
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
683
                   "cpu_throttle_increment",
684
                   "an integer in the range of 1 to 99");
685
        return;
686
    }
687 688 689 690 691 692 693
    if (params->has_max_bandwidth &&
        (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
        error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
                         " range of 0 to %zu bytes/second", SIZE_MAX);
        return;
    }
    if (params->has_downtime_limit &&
694 695 696 697 698
        (params->downtime_limit < 0 ||
         params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
        error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
                         "the range of 0 to %d milliseconds",
                         MAX_MIGRATE_DOWNTIME);
699 700
        return;
    }
701 702 703 704 705
    if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                    "x_checkpoint_delay",
                    "is invalid, it should be positive");
    }
706

707 708
    if (params->has_compress_level) {
        s->parameters.compress_level = params->compress_level;
709
    }
710 711
    if (params->has_compress_threads) {
        s->parameters.compress_threads = params->compress_threads;
712
    }
713 714
    if (params->has_decompress_threads) {
        s->parameters.decompress_threads = params->decompress_threads;
715
    }
716 717
    if (params->has_cpu_throttle_initial) {
        s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
718
    }
719 720
    if (params->has_cpu_throttle_increment) {
        s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
721
    }
722
    if (params->has_tls_creds) {
723
        g_free(s->parameters.tls_creds);
724
        s->parameters.tls_creds = g_strdup(params->tls_creds);
725
    }
726
    if (params->has_tls_hostname) {
727
        g_free(s->parameters.tls_hostname);
728
        s->parameters.tls_hostname = g_strdup(params->tls_hostname);
729
    }
730 731 732 733 734 735 736 737 738 739
    if (params->has_max_bandwidth) {
        s->parameters.max_bandwidth = params->max_bandwidth;
        if (s->to_dst_file) {
            qemu_file_set_rate_limit(s->to_dst_file,
                                s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
        }
    }
    if (params->has_downtime_limit) {
        s->parameters.downtime_limit = params->downtime_limit;
    }
740 741 742

    if (params->has_x_checkpoint_delay) {
        s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
743 744 745
        if (migration_in_colo_state()) {
            colo_checkpoint_notify(s);
        }
746
    }
747 748 749
    if (params->has_block_incremental) {
        s->parameters.block_incremental = params->block_incremental;
    }
750 751
}

752

753 754 755 756 757
void qmp_migrate_start_postcopy(Error **errp)
{
    MigrationState *s = migrate_get_current();

    if (!migrate_postcopy_ram()) {
758
        error_setg(errp, "Enable postcopy with migrate_set_capability before"
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
                         " the start of migration");
        return;
    }

    if (s->state == MIGRATION_STATUS_NONE) {
        error_setg(errp, "Postcopy must be started after migration has been"
                         " started");
        return;
    }
    /*
     * we don't error if migration has finished since that would be racy
     * with issuing this command.
     */
    atomic_set(&s->start_postcopy, true);
}

775 776
/* shared migration helpers */

777
void migrate_set_state(int *state, int old_state, int new_state)
778
{
779
    if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
780
        trace_migrate_set_state(new_state);
781
        migrate_generate_event(new_state);
782 783 784
    }
}

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
void migrate_set_block_enabled(bool value, Error **errp)
{
    MigrationCapabilityStatusList *cap;

    cap = g_new0(MigrationCapabilityStatusList, 1);
    cap->value = g_new0(MigrationCapabilityStatus, 1);
    cap->value->capability = MIGRATION_CAPABILITY_BLOCK;
    cap->value->state = value;
    qmp_migrate_set_capabilities(cap, errp);
    qapi_free_MigrationCapabilityStatusList(cap);
}

static void migrate_set_block_incremental(MigrationState *s, bool value)
{
    s->parameters.block_incremental = value;
}

static void block_cleanup_parameters(MigrationState *s)
{
    if (s->must_remove_block_options) {
        /* setting to false can never fail */
        migrate_set_block_enabled(false, &error_abort);
        migrate_set_block_incremental(s, false);
        s->must_remove_block_options = false;
    }
}

812
static void migrate_fd_cleanup(void *opaque)
813
{
814 815 816 817 818
    MigrationState *s = opaque;

    qemu_bh_delete(s->cleanup_bh);
    s->cleanup_bh = NULL;

819
    if (s->to_dst_file) {
820
        trace_migrate_fd_cleanup();
821
        qemu_mutex_unlock_iothread();
822 823 824 825
        if (s->migration_thread_running) {
            qemu_thread_join(&s->thread);
            s->migration_thread_running = false;
        }
826 827
        qemu_mutex_lock_iothread();

828 829
        qemu_fclose(s->to_dst_file);
        s->to_dst_file = NULL;
830 831
    }

832 833
    assert((s->state != MIGRATION_STATUS_ACTIVE) &&
           (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
834

835
    if (s->state == MIGRATION_STATUS_CANCELLING) {
836
        migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
837
                          MIGRATION_STATUS_CANCELLED);
838
    }
839 840

    notifier_list_notify(&migration_state_notifiers, s);
841
    block_cleanup_parameters(s);
842 843
}

844
void migrate_fd_error(MigrationState *s, const Error *error)
845
{
846
    trace_migrate_fd_error(error_get_pretty(error));
847
    assert(s->to_dst_file == NULL);
848 849
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_FAILED);
850 851 852
    if (!s->error) {
        s->error = error_copy(error);
    }
853
    notifier_list_notify(&migration_state_notifiers, s);
854
    block_cleanup_parameters(s);
855 856
}

857
static void migrate_fd_cancel(MigrationState *s)
858
{
859
    int old_state ;
860
    QEMUFile *f = migrate_get_current()->to_dst_file;
861
    trace_migrate_fd_cancel();
862

863 864 865 866 867
    if (s->rp_state.from_dst_file) {
        /* shutdown the rp socket, so causing the rp thread to shutdown */
        qemu_file_shutdown(s->rp_state.from_dst_file);
    }

868 869
    do {
        old_state = s->state;
870
        if (!migration_is_setup_or_active(old_state)) {
871 872
            break;
        }
873
        migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
874
    } while (s->state != MIGRATION_STATUS_CANCELLING);
875 876 877 878 879 880 881 882

    /*
     * If we're unlucky the migration code might be stuck somewhere in a
     * send/write while the network has failed and is waiting to timeout;
     * if we've got shutdown(2) available then we can force it to quit.
     * The outgoing qemu file gets closed in migrate_fd_cleanup that is
     * called in a bh, so there is no race against this cancel.
     */
883
    if (s->state == MIGRATION_STATUS_CANCELLING && f) {
884 885
        qemu_file_shutdown(f);
    }
886 887 888 889 890 891 892 893 894 895
    if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
        Error *local_err = NULL;

        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
        } else {
            s->block_inactive = false;
        }
    }
896
    block_cleanup_parameters(s);
897 898
}

899 900 901 902 903 904 905
void add_migration_state_change_notifier(Notifier *notify)
{
    notifier_list_add(&migration_state_notifiers, notify);
}

void remove_migration_state_change_notifier(Notifier *notify)
{
P
Paolo Bonzini 已提交
906
    notifier_remove(notify);
907 908
}

S
Stefan Hajnoczi 已提交
909
bool migration_in_setup(MigrationState *s)
910
{
911
    return s->state == MIGRATION_STATUS_SETUP;
912 913
}

914
bool migration_has_finished(MigrationState *s)
915
{
916
    return s->state == MIGRATION_STATUS_COMPLETED;
917
}
918

919 920
bool migration_has_failed(MigrationState *s)
{
921 922
    return (s->state == MIGRATION_STATUS_CANCELLED ||
            s->state == MIGRATION_STATUS_FAILED);
923 924
}

925
bool migration_in_postcopy(void)
926
{
927 928
    MigrationState *s = migrate_get_current();

929 930 931
    return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
}

932 933
bool migration_in_postcopy_after_devices(MigrationState *s)
{
934
    return migration_in_postcopy() && s->postcopy_after_devices;
935 936
}

937
bool migration_is_idle(void)
938
{
939
    MigrationState *s = migrate_get_current();
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959

    switch (s->state) {
    case MIGRATION_STATUS_NONE:
    case MIGRATION_STATUS_CANCELLED:
    case MIGRATION_STATUS_COMPLETED:
    case MIGRATION_STATUS_FAILED:
        return true;
    case MIGRATION_STATUS_SETUP:
    case MIGRATION_STATUS_CANCELLING:
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
    case MIGRATION_STATUS_COLO:
        return false;
    case MIGRATION_STATUS__MAX:
        g_assert_not_reached();
    }

    return false;
}

960
MigrationState *migrate_init(void)
961
{
962
    MigrationState *s = migrate_get_current();
O
Orit Wasserman 已提交
963

964 965 966 967 968 969 970 971
    /*
     * Reinitialise all migration state, except
     * parameters/capabilities that the user set, and
     * locks.
     */
    s->bytes_xfer = 0;
    s->xfer_limit = 0;
    s->cleanup_bh = 0;
972
    s->to_dst_file = NULL;
973 974 975 976 977 978 979 980
    s->state = MIGRATION_STATUS_NONE;
    s->rp_state.from_dst_file = NULL;
    s->rp_state.error = false;
    s->mbps = 0.0;
    s->downtime = 0;
    s->expected_downtime = 0;
    s->setup_time = 0;
    s->start_postcopy = false;
981
    s->postcopy_after_devices = false;
982
    s->migration_thread_running = false;
983 984
    error_free(s->error);
    s->error = NULL;
985

986
    migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
987

988
    s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
989 990
    return s;
}
991

A
Anthony Liguori 已提交
992 993
static GSList *migration_blockers;

994
int migrate_add_blocker(Error *reason, Error **errp)
A
Anthony Liguori 已提交
995
{
996
    if (migrate_get_current()->only_migratable) {
997 998 999 1000 1001 1002
        error_propagate(errp, error_copy(reason));
        error_prepend(errp, "disallowing migration blocker "
                          "(--only_migratable) for: ");
        return -EACCES;
    }

1003
    if (migration_is_idle()) {
1004 1005 1006 1007 1008 1009 1010 1011
        migration_blockers = g_slist_prepend(migration_blockers, reason);
        return 0;
    }

    error_propagate(errp, error_copy(reason));
    error_prepend(errp, "disallowing migration blocker (migration in "
                      "progress) for: ");
    return -EBUSY;
A
Anthony Liguori 已提交
1012 1013 1014 1015 1016 1017 1018
}

void migrate_del_blocker(Error *reason)
{
    migration_blockers = g_slist_remove(migration_blockers, reason);
}

D
Dr. David Alan Gilbert 已提交
1019 1020 1021
void qmp_migrate_incoming(const char *uri, Error **errp)
{
    Error *local_err = NULL;
1022
    static bool once = true;
D
Dr. David Alan Gilbert 已提交
1023 1024

    if (!deferred_incoming) {
1025
        error_setg(errp, "For use with '-incoming defer'");
D
Dr. David Alan Gilbert 已提交
1026 1027
        return;
    }
1028 1029 1030
    if (!once) {
        error_setg(errp, "The incoming migration has already been started");
    }
D
Dr. David Alan Gilbert 已提交
1031 1032 1033 1034 1035 1036 1037 1038

    qemu_start_incoming_migration(uri, &local_err);

    if (local_err) {
        error_propagate(errp, local_err);
        return;
    }

1039
    once = false;
D
Dr. David Alan Gilbert 已提交
1040 1041
}

1042 1043 1044 1045 1046 1047 1048
bool migration_is_blocked(Error **errp)
{
    if (qemu_savevm_state_blocked(errp)) {
        return true;
    }

    if (migration_blockers) {
1049
        error_propagate(errp, error_copy(migration_blockers->data));
1050 1051 1052 1053 1054 1055
        return true;
    }

    return false;
}

L
Luiz Capitulino 已提交
1056 1057 1058
void qmp_migrate(const char *uri, bool has_blk, bool blk,
                 bool has_inc, bool inc, bool has_detach, bool detach,
                 Error **errp)
1059
{
1060
    Error *local_err = NULL;
1061
    MigrationState *s = migrate_get_current();
1062 1063
    const char *p;

1064
    if (migration_is_setup_or_active(s->state) ||
1065 1066
        s->state == MIGRATION_STATUS_CANCELLING ||
        s->state == MIGRATION_STATUS_COLO) {
1067
        error_setg(errp, QERR_MIGRATION_ACTIVE);
L
Luiz Capitulino 已提交
1068
        return;
1069
    }
1070 1071 1072 1073 1074
    if (runstate_check(RUN_STATE_INMIGRATE)) {
        error_setg(errp, "Guest is waiting for an incoming migration");
        return;
    }

1075
    if (migration_is_blocked(errp)) {
L
Luiz Capitulino 已提交
1076
        return;
A
Anthony Liguori 已提交
1077 1078
    }

1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
    if ((has_blk && blk) || (has_inc && inc)) {
        if (migrate_use_block() || migrate_use_block_incremental()) {
            error_setg(errp, "Command options are incompatible with "
                       "current migration capabilities");
            return;
        }
        migrate_set_block_enabled(true, &local_err);
        if (local_err) {
            error_propagate(errp, local_err);
            return;
        }
        s->must_remove_block_options = true;
    }

    if (has_inc && inc) {
        migrate_set_block_incremental(s, true);
    }

1097
    s = migrate_init();
1098 1099

    if (strstart(uri, "tcp:", &p)) {
1100
        tcp_start_outgoing_migration(s, p, &local_err);
M
Michael R. Hines 已提交
1101
#ifdef CONFIG_RDMA
1102
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
1103 1104
        rdma_start_outgoing_migration(s, p, &local_err);
#endif
1105
    } else if (strstart(uri, "exec:", &p)) {
1106
        exec_start_outgoing_migration(s, p, &local_err);
1107
    } else if (strstart(uri, "unix:", &p)) {
1108
        unix_start_outgoing_migration(s, p, &local_err);
1109
    } else if (strstart(uri, "fd:", &p)) {
1110
        fd_start_outgoing_migration(s, p, &local_err);
1111
    } else {
1112 1113
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
                   "a valid migration protocol");
1114 1115
        migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                          MIGRATION_STATUS_FAILED);
L
Luiz Capitulino 已提交
1116
        return;
1117 1118
    }

1119
    if (local_err) {
1120
        migrate_fd_error(s, local_err);
1121
        error_propagate(errp, local_err);
L
Luiz Capitulino 已提交
1122
        return;
1123
    }
1124 1125
}

L
Luiz Capitulino 已提交
1126
void qmp_migrate_cancel(Error **errp)
1127
{
1128
    migrate_fd_cancel(migrate_get_current());
1129 1130
}

1131 1132 1133
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
{
    MigrationState *s = migrate_get_current();
1134
    int64_t new_size;
1135 1136 1137

    /* Check for truncation */
    if (value != (size_t)value) {
1138 1139
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeding address space");
1140 1141 1142
        return;
    }

1143 1144
    /* Cache should not be larger than guest ram size */
    if (value > ram_bytes_total()) {
1145 1146
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeds guest ram size ");
1147 1148 1149
        return;
    }

1150 1151
    new_size = xbzrle_cache_resize(value);
    if (new_size < 0) {
1152 1153
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "is smaller than page size");
1154 1155 1156 1157
        return;
    }

    s->xbzrle_cache_size = new_size;
1158 1159 1160 1161 1162 1163 1164
}

int64_t qmp_query_migrate_cache_size(Error **errp)
{
    return migrate_xbzrle_cache_size();
}

L
Luiz Capitulino 已提交
1165
void qmp_migrate_set_speed(int64_t value, Error **errp)
1166
{
1167 1168 1169 1170
    MigrationParameters p = {
        .has_max_bandwidth = true,
        .max_bandwidth = value,
    };
1171

1172
    qmp_migrate_set_parameters(&p, errp);
1173 1174
}

1175
void qmp_migrate_set_downtime(double value, Error **errp)
1176
{
1177 1178 1179 1180 1181 1182 1183
    if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
        error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
                         "the range of 0 to %d seconds",
                         MAX_MIGRATE_DOWNTIME_SECONDS);
        return;
    }

1184 1185 1186 1187 1188 1189 1190 1191 1192
    value *= 1000; /* Convert to milliseconds */
    value = MAX(0, MIN(INT64_MAX, value));

    MigrationParameters p = {
        .has_downtime_limit = true,
        .downtime_limit = value,
    };

    qmp_migrate_set_parameters(&p, errp);
1193
}
1194

1195 1196 1197 1198 1199 1200 1201 1202 1203
bool migrate_release_ram(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
}

1204 1205 1206 1207 1208 1209
bool migrate_postcopy_ram(void)
{
    MigrationState *s;

    s = migrate_get_current();

D
Dr. David Alan Gilbert 已提交
1210
    return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1211 1212
}

1213 1214 1215 1216 1217 1218 1219 1220 1221
bool migrate_auto_converge(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
}

1222 1223 1224 1225 1226 1227 1228 1229 1230
bool migrate_zero_blocks(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
}

1231 1232
bool migrate_use_compression(void)
{
1233 1234 1235 1236 1237
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1238 1239 1240 1241 1242 1243 1244 1245
}

int migrate_compress_level(void)
{
    MigrationState *s;

    s = migrate_get_current();

1246
    return s->parameters.compress_level;
1247 1248 1249 1250 1251 1252 1253 1254
}

int migrate_compress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

1255
    return s->parameters.compress_threads;
1256 1257
}

1258 1259 1260 1261 1262 1263
int migrate_decompress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

1264
    return s->parameters.decompress_threads;
1265 1266
}

1267 1268 1269 1270 1271 1272 1273 1274 1275
bool migrate_use_events(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
}

1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
int migrate_use_xbzrle(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
}

int64_t migrate_xbzrle_cache_size(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->xbzrle_cache_size;
}
1293

1294 1295 1296 1297 1298 1299 1300 1301 1302
bool migrate_use_block(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
}

1303 1304 1305 1306 1307 1308 1309 1310 1311
bool migrate_use_return_path(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
}

1312 1313 1314 1315 1316 1317 1318 1319 1320
bool migrate_use_block_incremental(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->parameters.block_incremental;
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
/* migration thread support */
/*
 * Something bad happened to the RP stream, mark an error
 * The caller shall print or trace something to indicate why
 */
static void mark_source_rp_bad(MigrationState *s)
{
    s->rp_state.error = true;
}

static struct rp_cmd_args {
    ssize_t     len; /* -1 = variable */
    const char *name;
} rp_cmd_args[] = {
    [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
    [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
    [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
1338 1339
    [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
    [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
1340 1341 1342
    [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
};

1343 1344 1345 1346 1347 1348 1349 1350
/*
 * Process a request for pages received on the return path,
 * We're allowed to send more than requested (e.g. to round to our page size)
 * and we don't need to send pages that have already been sent.
 */
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
                                       ram_addr_t start, size_t len)
{
1351 1352
    long our_host_ps = getpagesize();

1353
    trace_migrate_handle_rp_req_pages(rbname, start, len);
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366

    /*
     * Since we currently insist on matching page sizes, just sanity check
     * we're being asked for whole host pages.
     */
    if (start & (our_host_ps-1) ||
       (len & (our_host_ps-1))) {
        error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
                     " len: %zd", __func__, start, len);
        mark_source_rp_bad(ms);
        return;
    }

1367
    if (ram_save_queue_pages(rbname, start, len)) {
1368 1369
        mark_source_rp_bad(ms);
    }
1370 1371
}

1372 1373 1374 1375 1376 1377 1378 1379 1380
/*
 * Handles messages sent on the return path towards the source VM
 *
 */
static void *source_return_path_thread(void *opaque)
{
    MigrationState *ms = opaque;
    QEMUFile *rp = ms->rp_state.from_dst_file;
    uint16_t header_len, header_type;
1381
    uint8_t buf[512];
1382
    uint32_t tmp32, sibling_error;
1383 1384
    ram_addr_t start = 0; /* =0 to silence warning */
    size_t  len = 0, expected_len;
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
    int res;

    trace_source_return_path_thread_entry();
    while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
           migration_is_setup_or_active(ms->state)) {
        trace_source_return_path_thread_loop_top();
        header_type = qemu_get_be16(rp);
        header_len = qemu_get_be16(rp);

        if (header_type >= MIG_RP_MSG_MAX ||
            header_type == MIG_RP_MSG_INVALID) {
            error_report("RP: Received invalid message 0x%04x length 0x%04x",
                    header_type, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        if ((rp_cmd_args[header_type].len != -1 &&
            header_len != rp_cmd_args[header_type].len) ||
1404
            header_len > sizeof(buf)) {
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
            error_report("RP: Received '%s' message (0x%04x) with"
                    "incorrect length %d expecting %zu",
                    rp_cmd_args[header_type].name, header_type, header_len,
                    (size_t)rp_cmd_args[header_type].len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* We know we've got a valid header by this point */
        res = qemu_get_buffer(rp, buf, header_len);
        if (res != header_len) {
            error_report("RP: Failed reading data for message 0x%04x"
                         " read %d expected %d",
                         header_type, res, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* OK, we have the message and the data */
        switch (header_type) {
        case MIG_RP_MSG_SHUT:
1426
            sibling_error = ldl_be_p(buf);
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
            trace_source_return_path_thread_shut(sibling_error);
            if (sibling_error) {
                error_report("RP: Sibling indicated error %d", sibling_error);
                mark_source_rp_bad(ms);
            }
            /*
             * We'll let the main thread deal with closing the RP
             * we could do a shutdown(2) on it, but we're the only user
             * anyway, so there's nothing gained.
             */
            goto out;

        case MIG_RP_MSG_PONG:
1440
            tmp32 = ldl_be_p(buf);
1441 1442 1443
            trace_source_return_path_thread_pong(tmp32);
            break;

1444
        case MIG_RP_MSG_REQ_PAGES:
1445 1446
            start = ldq_be_p(buf);
            len = ldl_be_p(buf + 8);
1447 1448 1449 1450 1451 1452 1453
            migrate_handle_rp_req_pages(ms, NULL, start, len);
            break;

        case MIG_RP_MSG_REQ_PAGES_ID:
            expected_len = 12 + 1; /* header + termination */

            if (header_len >= expected_len) {
1454 1455
                start = ldq_be_p(buf);
                len = ldl_be_p(buf + 8);
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
                /* Now we expect an idstr */
                tmp32 = buf[12]; /* Length of the following idstr */
                buf[13 + tmp32] = '\0';
                expected_len += tmp32;
            }
            if (header_len != expected_len) {
                error_report("RP: Req_Page_id with length %d expecting %zd",
                        header_len, expected_len);
                mark_source_rp_bad(ms);
                goto out;
            }
            migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
            break;

1470 1471 1472 1473
        default:
            break;
        }
    }
D
Dr. David Alan Gilbert 已提交
1474
    if (qemu_file_get_error(rp)) {
1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
        trace_source_return_path_thread_bad_end();
        mark_source_rp_bad(ms);
    }

    trace_source_return_path_thread_end();
out:
    ms->rp_state.from_dst_file = NULL;
    qemu_fclose(rp);
    return NULL;
}

static int open_return_path_on_source(MigrationState *ms)
{

1489
    ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
    if (!ms->rp_state.from_dst_file) {
        return -1;
    }

    trace_open_return_path_on_source();
    qemu_thread_create(&ms->rp_state.rp_thread, "return path",
                       source_return_path_thread, ms, QEMU_THREAD_JOINABLE);

    trace_open_return_path_on_source_continue();

    return 0;
}

/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
static int await_return_path_close_on_source(MigrationState *ms)
{
    /*
     * If this is a normal exit then the destination will send a SHUT and the
     * rp_thread will exit, however if there's an error we need to cause
     * it to exit.
     */
1511
    if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
        /*
         * shutdown(2), if we have it, will cause it to unblock if it's stuck
         * waiting for the destination.
         */
        qemu_file_shutdown(ms->rp_state.from_dst_file);
        mark_source_rp_bad(ms);
    }
    trace_await_return_path_close_on_source_joining();
    qemu_thread_join(&ms->rp_state.rp_thread);
    trace_await_return_path_close_on_source_close();
    return ms->rp_state.error;
}

1525 1526 1527 1528 1529 1530 1531
/*
 * Switch from normal iteration to postcopy
 * Returns non-0 on error
 */
static int postcopy_start(MigrationState *ms, bool *old_vm_running)
{
    int ret;
1532 1533
    QIOChannelBuffer *bioc;
    QEMUFile *fb;
1534
    int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1535
    bool restart_block = false;
1536
    migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
                      MIGRATION_STATUS_POSTCOPY_ACTIVE);

    trace_postcopy_start();
    qemu_mutex_lock_iothread();
    trace_postcopy_start_set_run();

    qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
    *old_vm_running = runstate_is_running();
    global_state_store();
    ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1547 1548 1549
    if (ret < 0) {
        goto fail;
    }
1550

1551
    ret = bdrv_inactivate_all();
1552 1553 1554
    if (ret < 0) {
        goto fail;
    }
1555
    restart_block = true;
1556

1557 1558 1559 1560
    /*
     * Cause any non-postcopiable, but iterative devices to
     * send out their final data.
     */
1561
    qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
1562

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
    /*
     * in Finish migrate and with the io-lock held everything should
     * be quiet, but we've potentially still got dirty pages and we
     * need to tell the destination to throw any pages it's already received
     * that are dirty
     */
    if (ram_postcopy_send_discard_bitmap(ms)) {
        error_report("postcopy send discard bitmap failed");
        goto fail;
    }

    /*
     * send rest of state - note things that are doing postcopy
     * will notice we're in POSTCOPY_ACTIVE and not actually
     * wrap their state up here
     */
1579
    qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1580
    /* Ping just for debugging, helps line traces up */
1581
    qemu_savevm_send_ping(ms->to_dst_file, 2);
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593

    /*
     * While loading the device state we may trigger page transfer
     * requests and the fd must be free to process those, and thus
     * the destination must read the whole device state off the fd before
     * it starts processing it.  Unfortunately the ad-hoc migration format
     * doesn't allow the destination to know the size to read without fully
     * parsing it through each devices load-state code (especially the open
     * coded devices that use get/put).
     * So we wrap the device state up in a package with a length at the start;
     * to do this we use a qemu_buf to hold the whole of the device state.
     */
1594
    bioc = qio_channel_buffer_new(4096);
1595
    qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
1596 1597
    fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
    object_unref(OBJECT(bioc));
1598

1599 1600 1601 1602 1603 1604
    /*
     * Make sure the receiver can get incoming pages before we send the rest
     * of the state
     */
    qemu_savevm_send_postcopy_listen(fb);

1605
    qemu_savevm_state_complete_precopy(fb, false, false);
1606 1607 1608 1609 1610 1611
    qemu_savevm_send_ping(fb, 3);

    qemu_savevm_send_postcopy_run(fb);

    /* <><> end of stuff going into the package */

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
    /* Last point of recovery; as soon as we send the package the destination
     * can open devices and potentially start running.
     * Lets just check again we've not got any errors.
     */
    ret = qemu_file_get_error(ms->to_dst_file);
    if (ret) {
        error_report("postcopy_start: Migration stream errored (pre package)");
        goto fail_closefb;
    }

    restart_block = false;

1624
    /* Now send that blob */
1625
    if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1626 1627 1628
        goto fail_closefb;
    }
    qemu_fclose(fb);
1629 1630 1631 1632 1633 1634 1635 1636

    /* Send a notify to give a chance for anything that needs to happen
     * at the transition to postcopy and after the device state; in particular
     * spice needs to trigger a transition now
     */
    ms->postcopy_after_devices = true;
    notifier_list_notify(&migration_state_notifiers, ms);

1637 1638 1639 1640 1641 1642 1643 1644
    ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;

    qemu_mutex_unlock_iothread();

    /*
     * Although this ping is just for debug, it could potentially be
     * used for getting a better measurement of downtime at the source.
     */
1645
    qemu_savevm_send_ping(ms->to_dst_file, 4);
1646

1647 1648 1649 1650
    if (migrate_release_ram()) {
        ram_postcopy_migrated_memory_release(ms);
    }

1651
    ret = qemu_file_get_error(ms->to_dst_file);
1652 1653
    if (ret) {
        error_report("postcopy_start: Migration stream errored");
1654
        migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1655 1656 1657 1658 1659 1660 1661 1662
                              MIGRATION_STATUS_FAILED);
    }

    return ret;

fail_closefb:
    qemu_fclose(fb);
fail:
1663
    migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1664
                          MIGRATION_STATUS_FAILED);
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
    if (restart_block) {
        /* A failure happened early enough that we know the destination hasn't
         * accessed block devices, so we're safe to recover.
         */
        Error *local_err = NULL;

        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
        }
    }
1676 1677 1678 1679
    qemu_mutex_unlock_iothread();
    return -1;
}

1680 1681 1682 1683 1684
/**
 * migration_completion: Used by migration_thread when there's not much left.
 *   The caller 'breaks' the loop when this returns.
 *
 * @s: Current migration state
1685
 * @current_active_state: The migration state we expect to be in
1686 1687 1688
 * @*old_vm_running: Pointer to old_vm_running flag
 * @*start_time: Pointer to time to update
 */
1689 1690
static void migration_completion(MigrationState *s, int current_active_state,
                                 bool *old_vm_running,
1691 1692 1693 1694
                                 int64_t *start_time)
{
    int ret;

1695 1696 1697 1698 1699 1700 1701 1702
    if (s->state == MIGRATION_STATUS_ACTIVE) {
        qemu_mutex_lock_iothread();
        *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
        qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
        *old_vm_running = runstate_is_running();
        ret = global_state_store();

        if (!ret) {
1703
            bool inactivate = !migrate_colo_enabled();
1704
            ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1705 1706
            if (ret >= 0) {
                qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1707 1708
                ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
                                                         inactivate);
1709
            }
1710 1711
            if (inactivate && ret >= 0) {
                s->block_inactive = true;
1712 1713 1714
            }
        }
        qemu_mutex_unlock_iothread();
1715

1716 1717
        if (ret < 0) {
            goto fail;
1718
        }
1719 1720 1721
    } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
        trace_migration_completion_postcopy_end();

1722
        qemu_savevm_state_complete_postcopy(s->to_dst_file);
1723
        trace_migration_completion_postcopy_end_after_complete();
1724 1725
    }

1726 1727 1728 1729 1730 1731
    /*
     * If rp was opened we must clean up the thread before
     * cleaning everything else up (since if there are no failures
     * it will wait for the destination to send it's status in
     * a SHUT command).
     */
P
Peter Xu 已提交
1732
    if (s->rp_state.from_dst_file) {
1733
        int rp_error;
P
Peter Xu 已提交
1734
        trace_migration_return_path_end_before();
1735
        rp_error = await_return_path_close_on_source(s);
P
Peter Xu 已提交
1736
        trace_migration_return_path_end_after(rp_error);
1737
        if (rp_error) {
1738
            goto fail_invalidate;
1739
        }
1740 1741
    }

1742
    if (qemu_file_get_error(s->to_dst_file)) {
1743
        trace_migration_completion_file_err();
1744
        goto fail_invalidate;
1745 1746
    }

1747 1748 1749 1750 1751
    if (!migrate_colo_enabled()) {
        migrate_set_state(&s->state, current_active_state,
                          MIGRATION_STATUS_COMPLETED);
    }

1752 1753
    return;

1754 1755 1756 1757 1758 1759 1760
fail_invalidate:
    /* If not doing postcopy, vm_start() will be called: let's regain
     * control on images.
     */
    if (s->state == MIGRATION_STATUS_ACTIVE) {
        Error *local_err = NULL;

1761
        qemu_mutex_lock_iothread();
1762 1763 1764
        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
1765 1766
        } else {
            s->block_inactive = false;
1767
        }
1768
        qemu_mutex_unlock_iothread();
1769 1770
    }

1771
fail:
1772 1773
    migrate_set_state(&s->state, current_active_state,
                      MIGRATION_STATUS_FAILED);
1774 1775
}

1776 1777 1778 1779 1780 1781
bool migrate_colo_enabled(void)
{
    MigrationState *s = migrate_get_current();
    return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
}

1782 1783 1784 1785
/*
 * Master migration thread on the source VM.
 * It drives the migration and pumps the data down the outgoing channel.
 */
J
Juan Quintela 已提交
1786
static void *migration_thread(void *opaque)
1787
{
1788
    MigrationState *s = opaque;
1789
    /* Used by the bandwidth calcs, updated later */
1790 1791
    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
    int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1792
    int64_t initial_bytes = 0;
1793 1794 1795 1796 1797 1798
    /*
     * The final stage happens when the remaining data is smaller than
     * this threshold; it's calculated from the requested downtime and
     * measured bandwidth
     */
    int64_t threshold_size = 0;
1799
    int64_t start_time = initial_time;
1800
    int64_t end_time;
1801
    bool old_vm_running = false;
1802 1803 1804
    bool entered_postcopy = false;
    /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
    enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1805
    bool enable_colo = migrate_colo_enabled();
1806

1807 1808
    rcu_register_thread();

1809
    qemu_savevm_state_header(s->to_dst_file);
1810

1811 1812 1813 1814 1815
    /*
     * If we opened the return path, we need to make sure dst has it
     * opened as well.
     */
    if (s->rp_state.from_dst_file) {
1816
        /* Now tell the dest that it should open its end so it can reply */
1817
        qemu_savevm_send_open_return_path(s->to_dst_file);
1818 1819

        /* And do a ping that will make stuff easier to debug */
1820
        qemu_savevm_send_ping(s->to_dst_file, 1);
P
Peter Xu 已提交
1821
    }
1822

P
Peter Xu 已提交
1823
    if (migrate_postcopy_ram()) {
1824 1825 1826 1827 1828
        /*
         * Tell the destination that we *might* want to do postcopy later;
         * if the other end can't do postcopy it should fail now, nice and
         * early.
         */
1829
        qemu_savevm_send_postcopy_advise(s->to_dst_file);
1830 1831
    }

1832
    qemu_savevm_state_setup(s->to_dst_file);
1833

1834
    s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1835 1836
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_ACTIVE);
1837

1838 1839 1840 1841
    trace_migration_thread_setup_complete();

    while (s->state == MIGRATION_STATUS_ACTIVE ||
           s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1842
        int64_t current_time;
1843
        uint64_t pending_size;
1844

1845
        if (!qemu_file_rate_limit(s->to_dst_file)) {
1846 1847
            uint64_t pend_post, pend_nonpost;

1848 1849
            qemu_savevm_state_pending(s->to_dst_file, threshold_size,
                                      &pend_nonpost, &pend_post);
1850
            pending_size = pend_nonpost + pend_post;
1851
            trace_migrate_pending(pending_size, threshold_size,
1852
                                  pend_post, pend_nonpost);
1853
            if (pending_size && pending_size >= threshold_size) {
1854 1855 1856 1857
                /* Still a significant amount to transfer */

                if (migrate_postcopy_ram() &&
                    s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1858
                    pend_nonpost <= threshold_size &&
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
                    atomic_read(&s->start_postcopy)) {

                    if (!postcopy_start(s, &old_vm_running)) {
                        current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
                        entered_postcopy = true;
                    }

                    continue;
                }
                /* Just another iteration step */
1869
                qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1870
            } else {
1871
                trace_migration_thread_low_pending(pending_size);
1872
                migration_completion(s, current_active_state,
1873
                                     &old_vm_running, &start_time);
1874
                break;
1875 1876
            }
        }
1877

1878
        if (qemu_file_get_error(s->to_dst_file)) {
1879 1880
            migrate_set_state(&s->state, current_active_state,
                              MIGRATION_STATUS_FAILED);
1881
            trace_migration_thread_file_err();
1882 1883
            break;
        }
1884
        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1885
        if (current_time >= initial_time + BUFFER_DELAY) {
1886 1887
            uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
                                         initial_bytes;
1888
            uint64_t time_spent = current_time - initial_time;
1889
            double bandwidth = (double)transferred_bytes / time_spent;
1890
            threshold_size = bandwidth * s->parameters.downtime_limit;
1891

1892 1893
            s->mbps = (((double) transferred_bytes * 8.0) /
                    ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1894

1895
            trace_migrate_transferred(transferred_bytes, time_spent,
1896
                                      bandwidth, threshold_size);
1897 1898
            /* if we haven't sent anything, we don't want to recalculate
               10000 is a small enough number for our purposes */
1899 1900
            if (ram_counters.dirty_pages_rate && transferred_bytes > 10000) {
                s->expected_downtime = ram_counters.dirty_pages_rate *
1901
                    qemu_target_page_size() / bandwidth;
1902
            }
1903

1904
            qemu_file_reset_rate_limit(s->to_dst_file);
1905
            initial_time = current_time;
1906
            initial_bytes = qemu_ftell(s->to_dst_file);
1907
        }
1908
        if (qemu_file_rate_limit(s->to_dst_file)) {
1909 1910 1911
            /* usleep expects microseconds */
            g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
        }
1912 1913
    }

1914
    trace_migration_thread_after_loop();
1915 1916
    /* If we enabled cpu throttling for auto-converge, turn it off. */
    cpu_throttle_stop();
1917
    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1918

1919
    qemu_mutex_lock_iothread();
1920 1921 1922 1923 1924 1925 1926
    /*
     * The resource has been allocated by migration will be reused in COLO
     * process, so don't release them.
     */
    if (!enable_colo) {
        qemu_savevm_state_cleanup();
    }
1927
    if (s->state == MIGRATION_STATUS_COMPLETED) {
1928
        uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1929
        s->total_time = end_time - s->total_time;
1930 1931 1932
        if (!entered_postcopy) {
            s->downtime = end_time - start_time;
        }
1933 1934 1935 1936
        if (s->total_time) {
            s->mbps = (((double) transferred_bytes * 8.0) /
                       ((double) s->total_time)) / 1000;
        }
1937 1938
        runstate_set(RUN_STATE_POSTMIGRATE);
    } else {
1939 1940 1941 1942 1943 1944 1945 1946 1947
        if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
            migrate_start_colo_process(s);
            qemu_savevm_state_cleanup();
            /*
            * Fixme: we will run VM in COLO no matter its old running state.
            * After exited COLO, we will keep running.
            */
            old_vm_running = true;
        }
1948
        if (old_vm_running && !entered_postcopy) {
1949
            vm_start();
1950 1951 1952 1953
        } else {
            if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
                runstate_set(RUN_STATE_POSTMIGRATE);
            }
1954
        }
1955
    }
1956
    qemu_bh_schedule(s->cleanup_bh);
1957
    qemu_mutex_unlock_iothread();
1958

1959
    rcu_unregister_thread();
1960 1961 1962
    return NULL;
}

1963
void migrate_fd_connect(MigrationState *s)
1964
{
1965
    s->expected_downtime = s->parameters.downtime_limit;
1966
    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1967

1968
    qemu_file_set_blocking(s->to_dst_file, true);
1969
    qemu_file_set_rate_limit(s->to_dst_file,
1970
                             s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
1971

1972 1973 1974
    /* Notify before starting migration thread */
    notifier_list_notify(&migration_state_notifiers, s);

1975
    /*
1976 1977 1978
     * Open the return path. For postcopy, it is used exclusively. For
     * precopy, only if user specified "return-path" capability would
     * QEMU uses the return path.
1979
     */
1980
    if (migrate_postcopy_ram() || migrate_use_return_path()) {
1981 1982
        if (open_return_path_on_source(s)) {
            error_report("Unable to open return-path for postcopy");
1983
            migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1984 1985 1986 1987 1988 1989
                              MIGRATION_STATUS_FAILED);
            migrate_fd_cleanup(s);
            return;
        }
    }

1990
    qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
1991
                       QEMU_THREAD_JOINABLE);
1992
    s->migration_thread_running = true;
1993
}
1994

P
Peter Xu 已提交
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
void migration_global_dump(Monitor *mon)
{
    MigrationState *ms = migrate_get_current();

    monitor_printf(mon, "globals: store-global-state=%d, only_migratable=%d, "
                   "send-configuration=%d, send-section-footer=%d\n",
                   ms->store_global_state, ms->only_migratable,
                   ms->send_configuration, ms->send_section_footer);
}

2005 2006 2007
static Property migration_properties[] = {
    DEFINE_PROP_BOOL("store-global-state", MigrationState,
                     store_global_state, true),
2008
    DEFINE_PROP_BOOL("only-migratable", MigrationState, only_migratable, false),
2009 2010
    DEFINE_PROP_BOOL("send-configuration", MigrationState,
                     send_configuration, true),
P
Peter Xu 已提交
2011 2012
    DEFINE_PROP_BOOL("send-section-footer", MigrationState,
                     send_section_footer, true),
P
Peter Xu 已提交
2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037

    /* Migration parameters */
    DEFINE_PROP_INT64("x-compress-level", MigrationState,
                      parameters.compress_level,
                      DEFAULT_MIGRATE_COMPRESS_LEVEL),
    DEFINE_PROP_INT64("x-compress-threads", MigrationState,
                      parameters.compress_threads,
                      DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
    DEFINE_PROP_INT64("x-decompress-threads", MigrationState,
                      parameters.decompress_threads,
                      DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
    DEFINE_PROP_INT64("x-cpu-throttle-initial", MigrationState,
                      parameters.cpu_throttle_initial,
                      DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
    DEFINE_PROP_INT64("x-cpu-throttle-increment", MigrationState,
                      parameters.cpu_throttle_increment,
                      DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
    DEFINE_PROP_INT64("x-max-bandwidth", MigrationState,
                      parameters.max_bandwidth, MAX_THROTTLE),
    DEFINE_PROP_INT64("x-downtime-limit", MigrationState,
                      parameters.downtime_limit,
                      DEFAULT_MIGRATE_SET_DOWNTIME),
    DEFINE_PROP_INT64("x-checkpoint-delay", MigrationState,
                      parameters.x_checkpoint_delay,
                      DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
2038 2039 2040
    DEFINE_PROP_END_OF_LIST(),
};

2041 2042 2043 2044 2045
static void migration_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);

    dc->user_creatable = false;
2046
    dc->props = migration_properties;
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
}

static void migration_instance_init(Object *obj)
{
    MigrationState *ms = MIGRATION_OBJ(obj);

    ms->state = MIGRATION_STATUS_NONE;
    ms->xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE;
    ms->mbps = -1;
    ms->parameters.tls_creds = g_strdup("");
    ms->parameters.tls_hostname = g_strdup("");
}

static const TypeInfo migration_type = {
    .name = TYPE_MIGRATION,
2062
    /*
P
Peter Xu 已提交
2063 2064 2065 2066 2067 2068
     * NOTE: TYPE_MIGRATION is not really a device, as the object is
     * not created using qdev_create(), it is not attached to the qdev
     * device tree, and it is never realized.
     *
     * TODO: Make this TYPE_OBJECT once QOM provides something like
     * TYPE_DEVICE's "-global" properties.
2069
     */
2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
    .parent = TYPE_DEVICE,
    .class_init = migration_class_init,
    .class_size = sizeof(MigrationClass),
    .instance_size = sizeof(MigrationState),
    .instance_init = migration_instance_init,
};

static void register_migration_types(void)
{
    type_register_static(&migration_type);
}

type_init(register_migration_types);