migration.c 66.3 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * QEMU live migration
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
A
aliguori 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qemu/cutils.h"
18
#include "qemu/error-report.h"
19
#include "migration/blocker.h"
20
#include "exec.h"
21
#include "fd.h"
22
#include "socket.h"
23
#include "rdma.h"
24
#include "ram.h"
J
Juan Quintela 已提交
25
#include "migration/global_state.h"
26
#include "migration/misc.h"
27
#include "migration.h"
28
#include "savevm.h"
29
#include "qemu-file-channel.h"
J
Juan Quintela 已提交
30
#include "qemu-file.h"
31
#include "migration/vmstate.h"
32
#include "block/block.h"
33
#include "qapi/qmp/qerror.h"
34
#include "qapi/util.h"
35
#include "qemu/rcu.h"
36
#include "block.h"
37
#include "postcopy-ram.h"
38
#include "qemu/thread.h"
L
Luiz Capitulino 已提交
39
#include "qmp-commands.h"
40
#include "trace.h"
41
#include "qapi-event.h"
42
#include "exec/target_page.h"
43
#include "io/channel-buffer.h"
44
#include "migration/colo.h"
45
#include "hw/boards.h"
P
Peter Xu 已提交
46
#include "monitor/monitor.h"
47

48
#define MAX_THROTTLE  (32 << 20)      /* Migration transfer speed throttling */
A
aliguori 已提交
49

J
Juan Quintela 已提交
50 51 52 53 54
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
 * data. */
#define BUFFER_DELAY     100
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)

55 56 57 58
/* Time in milliseconds we are allowed to stop the source,
 * for sending the last part */
#define DEFAULT_MIGRATE_SET_DOWNTIME 300

59 60 61 62
/* Maximum migrate downtime set to 2000 seconds */
#define MAX_MIGRATE_DOWNTIME_SECONDS 2000
#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)

63 64
/* Default compression thread count */
#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
65 66 67
/* Default decompression thread count, usually decompression is at
 * least 4 times as fast as compression.*/
#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
68 69
/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
70
/* Define default autoconverge cpu throttle migration parameters */
71 72
#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
73

74 75 76
/* Migration XBZRLE default cache size */
#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)

77 78 79 80 81
/* The delay time (in ms) between two COLO checkpoints
 * Note: Please change this default value to 10000 when we support hybrid mode.
 */
#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200

82 83 84
static NotifierList migration_state_notifiers =
    NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);

D
Dr. David Alan Gilbert 已提交
85 86
static bool deferred_incoming;

87 88 89 90 91 92 93 94 95 96 97 98
/* Messages sent on the return path from destination to source */
enum mig_rp_message_type {
    MIG_RP_MSG_INVALID = 0,  /* Must be 0 */
    MIG_RP_MSG_SHUT,         /* sibling will not send any more RP messages */
    MIG_RP_MSG_PONG,         /* Response to a PING; data (seq: be32 ) */

    MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
    MIG_RP_MSG_REQ_PAGES,    /* data (start: be64, len: be32) */

    MIG_RP_MSG_MAX
};

99 100 101 102
/* When we add fault tolerance, we could have several
   migrations at once.  For now we don't need to add
   dynamic creation of migration */

103 104 105 106
static MigrationState *current_migration;

void migration_object_init(void)
{
107 108
    MachineState *ms = MACHINE(qdev_get_machine());

109 110 111
    /* This can only be called once. */
    assert(!current_migration);
    current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
112 113 114 115 116 117 118 119 120

    /*
     * We cannot really do this in migration_instance_init() since at
     * that time global properties are not yet applied, then this
     * value will be definitely replaced by something else.
     */
    if (ms->enforce_config_section) {
        current_migration->send_configuration = true;
    }
121 122
}

123
/* For outgoing */
124
MigrationState *migrate_get_current(void)
125
{
126 127 128
    /* This can only be called after the object created. */
    assert(current_migration);
    return current_migration;
129 130
}

131 132
MigrationIncomingState *migration_incoming_get_current(void)
{
133 134
    static bool once;
    static MigrationIncomingState mis_current;
135

136 137 138 139 140 141 142 143
    if (!once) {
        mis_current.state = MIGRATION_STATUS_NONE;
        memset(&mis_current, 0, sizeof(MigrationIncomingState));
        qemu_mutex_init(&mis_current.rp_mutex);
        qemu_event_init(&mis_current.main_thread_load_event, false);
        once = true;
    }
    return &mis_current;
144 145 146 147
}

void migration_incoming_state_destroy(void)
{
148 149
    struct MigrationIncomingState *mis = migration_incoming_get_current();

150
    if (mis->to_src_file) {
151 152
        /* Tell source that we are done */
        migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
153 154 155 156
        qemu_fclose(mis->to_src_file);
        mis->to_src_file = NULL;
    }

157 158 159 160 161
    if (mis->from_src_file) {
        qemu_fclose(mis->from_src_file);
        mis->from_src_file = NULL;
    }

162
    qemu_event_destroy(&mis->main_thread_load_event);
163 164
}

165 166 167 168 169 170 171
static void migrate_generate_event(int new_state)
{
    if (migrate_use_events()) {
        qapi_event_send_migration(new_state, &error_abort);
    }
}

D
Dr. David Alan Gilbert 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184
/*
 * Called on -incoming with a defer: uri.
 * The migration can be started later after any parameters have been
 * changed.
 */
static void deferred_incoming_migration(Error **errp)
{
    if (deferred_incoming) {
        error_setg(errp, "Incoming migration already deferred");
    }
    deferred_incoming = true;
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
/*
 * Send a message on the return channel back to the source
 * of the migration.
 */
static void migrate_send_rp_message(MigrationIncomingState *mis,
                                    enum mig_rp_message_type message_type,
                                    uint16_t len, void *data)
{
    trace_migrate_send_rp_message((int)message_type, len);
    qemu_mutex_lock(&mis->rp_mutex);
    qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
    qemu_put_be16(mis->to_src_file, len);
    qemu_put_buffer(mis->to_src_file, data, len);
    qemu_fflush(mis->to_src_file);
    qemu_mutex_unlock(&mis->rp_mutex);
}

202 203 204 205 206 207 208 209 210 211
/* Request a range of pages from the source VM at the given
 * start address.
 *   rbname: Name of the RAMBlock to request the page in, if NULL it's the same
 *           as the last request (a name must have been given previously)
 *   Start: Address offset within the RB
 *   Len: Length in bytes required - must be a multiple of pagesize
 */
void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
                               ram_addr_t start, size_t len)
{
S
Stefan Weil 已提交
212
    uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
    size_t msglen = 12; /* start + len */

    *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
    *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);

    if (rbname) {
        int rbname_len = strlen(rbname);
        assert(rbname_len < 256);

        bufc[msglen++] = rbname_len;
        memcpy(bufc + msglen, rbname, rbname_len);
        msglen += rbname_len;
        migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
    } else {
        migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
    }
}

231
void qemu_start_incoming_migration(const char *uri, Error **errp)
A
aliguori 已提交
232
{
A
aliguori 已提交
233 234
    const char *p;

235
    qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
D
Dr. David Alan Gilbert 已提交
236 237 238
    if (!strcmp(uri, "defer")) {
        deferred_incoming_migration(errp);
    } else if (strstart(uri, "tcp:", &p)) {
239
        tcp_start_incoming_migration(p, errp);
M
Michael R. Hines 已提交
240
#ifdef CONFIG_RDMA
D
Dr. David Alan Gilbert 已提交
241
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
242 243
        rdma_start_incoming_migration(p, errp);
#endif
D
Dr. David Alan Gilbert 已提交
244
    } else if (strstart(uri, "exec:", &p)) {
245
        exec_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
246
    } else if (strstart(uri, "unix:", &p)) {
247
        unix_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
248
    } else if (strstart(uri, "fd:", &p)) {
249
        fd_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
250
    } else {
251
        error_setg(errp, "unknown migration protocol: %s", uri);
J
Juan Quintela 已提交
252
    }
A
aliguori 已提交
253 254
}

255 256 257 258 259
static void process_incoming_migration_bh(void *opaque)
{
    Error *local_err = NULL;
    MigrationIncomingState *mis = opaque;

260 261
    /* Make sure all file formats flush their mutable metadata.
     * If we get an error here, just don't restart the VM yet. */
262
    bdrv_invalidate_cache_all(&local_err);
263
    if (local_err) {
264
        error_report_err(local_err);
265 266 267 268
        local_err = NULL;
        autostart = false;
    }

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
    /*
     * This must happen after all error conditions are dealt with and
     * we're sure the VM is going to be running on this host.
     */
    qemu_announce_self();

    /* If global state section was not received or we are in running
       state, we need to obey autostart. Any other state is set with
       runstate_set. */

    if (!global_state_received() ||
        global_state_get_runstate() == RUN_STATE_RUNNING) {
        if (autostart) {
            vm_start();
        } else {
            runstate_set(RUN_STATE_PAUSED);
        }
    } else {
        runstate_set(global_state_get_runstate());
    }
    /*
     * This must happen after any state changes since as soon as an external
     * observer sees this event they might start to prod at the VM assuming
     * it's ready to use.
     */
    migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                      MIGRATION_STATUS_COMPLETED);
    qemu_bh_delete(mis->bh);
    migration_incoming_state_destroy();
}

300
static void process_incoming_migration_co(void *opaque)
301
{
302
    QEMUFile *f = opaque;
303
    MigrationIncomingState *mis = migration_incoming_get_current();
304
    PostcopyState ps;
305 306
    int ret;

307
    mis->from_src_file = f;
308
    mis->largest_page_size = qemu_ram_pagesize_largest();
309
    postcopy_state_set(POSTCOPY_INCOMING_NONE);
310 311
    migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
                      MIGRATION_STATUS_ACTIVE);
312
    ret = qemu_loadvm_state(f);
313

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
    ps = postcopy_state_get();
    trace_process_incoming_migration_co_end(ret, ps);
    if (ps != POSTCOPY_INCOMING_NONE) {
        if (ps == POSTCOPY_INCOMING_ADVISE) {
            /*
             * Where a migration had postcopy enabled (and thus went to advise)
             * but managed to complete within the precopy period, we can use
             * the normal exit.
             */
            postcopy_ram_incoming_cleanup(mis);
        } else if (ret >= 0) {
            /*
             * Postcopy was started, cleanup should happen at the end of the
             * postcopy thread.
             */
            trace_process_incoming_migration_co_postcopy_end_main();
            return;
        }
        /* Else if something went wrong then just fall out of the normal exit */
    }

335 336 337 338 339 340 341 342 343 344 345 346
    /* we get COLO info, and know if we are in COLO mode */
    if (!ret && migration_incoming_enable_colo()) {
        mis->migration_incoming_co = qemu_coroutine_self();
        qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
             colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
        mis->have_colo_incoming_thread = true;
        qemu_coroutine_yield();

        /* Wait checkpoint incoming thread exit before free resource */
        qemu_thread_join(&mis->colo_incoming_thread);
    }

347
    if (ret < 0) {
348 349
        migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                          MIGRATION_STATUS_FAILED);
350
        error_report("load of migration failed: %s", strerror(-ret));
351
        qemu_fclose(mis->from_src_file);
352
        exit(EXIT_FAILURE);
353
    }
354 355
    mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
    qemu_bh_schedule(mis->bh);
356 357
}

358
void migration_fd_process_incoming(QEMUFile *f)
359
{
360
    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
361

362
    qemu_file_set_blocking(f, false);
363
    qemu_coroutine_enter(co);
364 365
}

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
/*
 * Send a 'SHUT' message on the return channel with the given value
 * to indicate that we've finished with the RP.  Non-0 value indicates
 * error.
 */
void migrate_send_rp_shut(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
}

/*
 * Send a 'PONG' message on the return channel with the given value
 * (normally in response to a 'PING')
 */
void migrate_send_rp_pong(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
}

O
Orit Wasserman 已提交
393 394 395 396 397 398 399
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
{
    MigrationCapabilityStatusList *head = NULL;
    MigrationCapabilityStatusList *caps;
    MigrationState *s = migrate_get_current();
    int i;

400
    caps = NULL; /* silence compiler warning */
401
    for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
402 403 404 405 406
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
        if (i == MIGRATION_CAPABILITY_BLOCK) {
            continue;
        }
#endif
407 408 409
        if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
            continue;
        }
O
Orit Wasserman 已提交
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
        if (head == NULL) {
            head = g_malloc0(sizeof(*caps));
            caps = head;
        } else {
            caps->next = g_malloc0(sizeof(*caps));
            caps = caps->next;
        }
        caps->value =
            g_malloc(sizeof(*caps->value));
        caps->value->capability = i;
        caps->value->state = s->enabled_capabilities[i];
    }

    return head;
}

426 427 428 429 430 431
MigrationParameters *qmp_query_migrate_parameters(Error **errp)
{
    MigrationParameters *params;
    MigrationState *s = migrate_get_current();

    params = g_malloc0(sizeof(*params));
432
    params->has_compress_level = true;
433
    params->compress_level = s->parameters.compress_level;
434
    params->has_compress_threads = true;
435
    params->compress_threads = s->parameters.compress_threads;
436
    params->has_decompress_threads = true;
437
    params->decompress_threads = s->parameters.decompress_threads;
438
    params->has_cpu_throttle_initial = true;
439
    params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
440
    params->has_cpu_throttle_increment = true;
441
    params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
442
    params->has_tls_creds = !!s->parameters.tls_creds;
443
    params->tls_creds = g_strdup(s->parameters.tls_creds);
444
    params->has_tls_hostname = !!s->parameters.tls_hostname;
445
    params->tls_hostname = g_strdup(s->parameters.tls_hostname);
446 447 448 449
    params->has_max_bandwidth = true;
    params->max_bandwidth = s->parameters.max_bandwidth;
    params->has_downtime_limit = true;
    params->downtime_limit = s->parameters.downtime_limit;
450
    params->has_x_checkpoint_delay = true;
451
    params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
452 453
    params->has_block_incremental = true;
    params->block_incremental = s->parameters.block_incremental;
454 455 456 457

    return params;
}

458 459 460 461 462 463 464 465
/*
 * Return true if we're already in the middle of a migration
 * (i.e. any of the active or setup states)
 */
static bool migration_is_setup_or_active(int state)
{
    switch (state) {
    case MIGRATION_STATUS_ACTIVE:
466
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
467 468 469 470 471 472 473 474 475
    case MIGRATION_STATUS_SETUP:
        return true;

    default:
        return false;

    }
}

476 477 478 479
static void populate_ram_info(MigrationInfo *info, MigrationState *s)
{
    info->has_ram = true;
    info->ram = g_malloc0(sizeof(*info->ram));
480
    info->ram->transferred = ram_counters.transferred;
481
    info->ram->total = ram_bytes_total();
482
    info->ram->duplicate = ram_counters.duplicate;
483 484
    /* legacy value.  It is not used anymore */
    info->ram->skipped = 0;
485 486
    info->ram->normal = ram_counters.normal;
    info->ram->normal_bytes = ram_counters.normal *
487
        qemu_target_page_size();
488
    info->ram->mbps = s->mbps;
489 490
    info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
    info->ram->postcopy_requests = ram_counters.postcopy_requests;
491
    info->ram->page_size = qemu_target_page_size();
492

493 494 495 496
    if (migrate_use_xbzrle()) {
        info->has_xbzrle_cache = true;
        info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
        info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
497 498 499 500 501
        info->xbzrle_cache->bytes = xbzrle_counters.bytes;
        info->xbzrle_cache->pages = xbzrle_counters.pages;
        info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
        info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
        info->xbzrle_cache->overflow = xbzrle_counters.overflow;
502 503
    }

504 505 506 507 508
    if (cpu_throttle_active()) {
        info->has_cpu_throttle_percentage = true;
        info->cpu_throttle_percentage = cpu_throttle_get_percentage();
    }

509 510
    if (s->state != MIGRATION_STATUS_COMPLETED) {
        info->ram->remaining = ram_bytes_remaining();
511
        info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate;
512 513 514
    }
}

515 516 517 518 519 520 521 522 523 524 525
static void populate_disk_info(MigrationInfo *info)
{
    if (blk_mig_active()) {
        info->has_disk = true;
        info->disk = g_malloc0(sizeof(*info->disk));
        info->disk->transferred = blk_mig_bytes_transferred();
        info->disk->remaining = blk_mig_bytes_remaining();
        info->disk->total = blk_mig_bytes_total();
    }
}

L
Luiz Capitulino 已提交
526
MigrationInfo *qmp_query_migrate(Error **errp)
A
aliguori 已提交
527
{
L
Luiz Capitulino 已提交
528
    MigrationInfo *info = g_malloc0(sizeof(*info));
529 530 531
    MigrationState *s = migrate_get_current();

    switch (s->state) {
532
    case MIGRATION_STATUS_NONE:
533 534
        /* no migration has happened ever */
        break;
535
    case MIGRATION_STATUS_SETUP:
536
        info->has_status = true;
537
        info->has_total_time = false;
538
        break;
539 540
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_CANCELLING:
541
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
542
         /* TODO add some postcopy stats */
543 544 545 546 547 548 549 550 551
        info->has_status = true;
        info->has_total_time = true;
        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
            - s->total_time;
        info->has_expected_downtime = true;
        info->expected_downtime = s->expected_downtime;
        info->has_setup_time = true;
        info->setup_time = s->setup_time;

552
        populate_ram_info(info, s);
553
        populate_disk_info(info);
554
        break;
555 556 557 558
    case MIGRATION_STATUS_COLO:
        info->has_status = true;
        /* TODO: display COLO specific information (checkpoint info etc.) */
        break;
559
    case MIGRATION_STATUS_COMPLETED:
L
Luiz Capitulino 已提交
560
        info->has_status = true;
561
        info->has_total_time = true;
562
        info->total_time = s->total_time;
563 564
        info->has_downtime = true;
        info->downtime = s->downtime;
565 566
        info->has_setup_time = true;
        info->setup_time = s->setup_time;
J
Juan Quintela 已提交
567

568
        populate_ram_info(info, s);
569
        break;
570
    case MIGRATION_STATUS_FAILED:
L
Luiz Capitulino 已提交
571
        info->has_status = true;
572 573 574 575
        if (s->error) {
            info->has_error_desc = true;
            info->error_desc = g_strdup(error_get_pretty(s->error));
        }
576
        break;
577
    case MIGRATION_STATUS_CANCELLED:
L
Luiz Capitulino 已提交
578
        info->has_status = true;
579
        break;
A
aliguori 已提交
580
    }
581
    info->status = s->state;
L
Luiz Capitulino 已提交
582 583

    return info;
A
aliguori 已提交
584 585
}

O
Orit Wasserman 已提交
586 587 588 589 590
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
                                  Error **errp)
{
    MigrationState *s = migrate_get_current();
    MigrationCapabilityStatusList *cap;
591
    bool old_postcopy_cap = migrate_postcopy_ram();
O
Orit Wasserman 已提交
592

593
    if (migration_is_setup_or_active(s->state)) {
594
        error_setg(errp, QERR_MIGRATION_ACTIVE);
O
Orit Wasserman 已提交
595 596 597 598
        return;
    }

    for (cap = params; cap; cap = cap->next) {
599 600 601 602 603 604 605 606 607
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
        if (cap->value->capability == MIGRATION_CAPABILITY_BLOCK
            && cap->value->state) {
            error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
                       "block migration");
            error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
            continue;
        }
#endif
608 609 610 611 612 613 614 615
        if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
            if (!colo_supported()) {
                error_setg(errp, "COLO is not currently supported, please"
                             " configure with --enable-colo option in order to"
                             " support COLO feature");
                continue;
            }
        }
O
Orit Wasserman 已提交
616 617
        s->enabled_capabilities[cap->value->capability] = cap->value->state;
    }
618 619 620 621 622 623 624 625 626 627

    if (migrate_postcopy_ram()) {
        if (migrate_use_compression()) {
            /* The decompression threads asynchronously write into RAM
             * rather than use the atomic copies needed to avoid
             * userfaulting.  It should be possible to fix the decompression
             * threads for compatibility in future.
             */
            error_report("Postcopy is not currently compatible with "
                         "compression");
D
Dr. David Alan Gilbert 已提交
628
            s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
629 630
                false;
        }
631 632 633 634 635 636 637 638 639 640 641 642 643
        /* This check is reasonably expensive, so only when it's being
         * set the first time, also it's only the destination that needs
         * special support.
         */
        if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
            !postcopy_ram_supported_by_host()) {
            /* postcopy_ram_supported_by_host will have emitted a more
             * detailed message
             */
            error_report("Postcopy is not supported");
            s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
                false;
        }
644
    }
O
Orit Wasserman 已提交
645 646
}

647 648 649 650 651
/*
 * Check whether the parameters are valid. Error will be put into errp
 * (if provided). Return true if valid, otherwise false.
 */
static bool migrate_params_check(MigrationParameters *params, Error **errp)
652
{
653 654
    if (params->has_compress_level &&
        (params->compress_level < 0 || params->compress_level > 9)) {
655 656
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
                   "is invalid, it should be in the range of 0 to 9");
657
        return false;
658
    }
659

660 661
    if (params->has_compress_threads &&
        (params->compress_threads < 1 || params->compress_threads > 255)) {
662 663 664
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "compress_threads",
                   "is invalid, it should be in the range of 1 to 255");
665
        return false;
666
    }
667

668 669
    if (params->has_decompress_threads &&
        (params->decompress_threads < 1 || params->decompress_threads > 255)) {
670 671 672
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "decompress_threads",
                   "is invalid, it should be in the range of 1 to 255");
673
        return false;
674
    }
675

676 677 678
    if (params->has_cpu_throttle_initial &&
        (params->cpu_throttle_initial < 1 ||
         params->cpu_throttle_initial > 99)) {
679
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
680
                   "cpu_throttle_initial",
681
                   "an integer in the range of 1 to 99");
682
        return false;
683
    }
684

685 686 687
    if (params->has_cpu_throttle_increment &&
        (params->cpu_throttle_increment < 1 ||
         params->cpu_throttle_increment > 99)) {
688
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
689
                   "cpu_throttle_increment",
690
                   "an integer in the range of 1 to 99");
691
        return false;
692
    }
693

694 695 696 697
    if (params->has_max_bandwidth &&
        (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
        error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
                         " range of 0 to %zu bytes/second", SIZE_MAX);
698
        return false;
699
    }
700

701
    if (params->has_downtime_limit &&
702 703 704 705 706
        (params->downtime_limit < 0 ||
         params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
        error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
                         "the range of 0 to %d milliseconds",
                         MAX_MIGRATE_DOWNTIME);
707
        return false;
708
    }
709

710 711 712 713
    if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                    "x_checkpoint_delay",
                    "is invalid, it should be positive");
714 715 716 717 718 719
        return false;
    }

    return true;
}

720
static void migrate_params_apply(MigrationParameters *params)
721 722 723
{
    MigrationState *s = migrate_get_current();

724 725
    if (params->has_compress_level) {
        s->parameters.compress_level = params->compress_level;
726
    }
727

728 729
    if (params->has_compress_threads) {
        s->parameters.compress_threads = params->compress_threads;
730
    }
731

732 733
    if (params->has_decompress_threads) {
        s->parameters.decompress_threads = params->decompress_threads;
734
    }
735

736 737
    if (params->has_cpu_throttle_initial) {
        s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
738
    }
739

740 741
    if (params->has_cpu_throttle_increment) {
        s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
742
    }
743

744
    if (params->has_tls_creds) {
745
        g_free(s->parameters.tls_creds);
746
        s->parameters.tls_creds = g_strdup(params->tls_creds);
747
    }
748

749
    if (params->has_tls_hostname) {
750
        g_free(s->parameters.tls_hostname);
751
        s->parameters.tls_hostname = g_strdup(params->tls_hostname);
752
    }
753

754 755 756 757 758 759 760
    if (params->has_max_bandwidth) {
        s->parameters.max_bandwidth = params->max_bandwidth;
        if (s->to_dst_file) {
            qemu_file_set_rate_limit(s->to_dst_file,
                                s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
        }
    }
761

762 763 764
    if (params->has_downtime_limit) {
        s->parameters.downtime_limit = params->downtime_limit;
    }
765 766 767

    if (params->has_x_checkpoint_delay) {
        s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
768 769 770
        if (migration_in_colo_state()) {
            colo_checkpoint_notify(s);
        }
771
    }
772

773 774 775
    if (params->has_block_incremental) {
        s->parameters.block_incremental = params->block_incremental;
    }
776 777
}

778 779 780 781 782 783 784 785 786 787
void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
{
    if (!migrate_params_check(params, errp)) {
        /* Invalid parameter */
        return;
    }

    migrate_params_apply(params);
}

788

789 790 791 792 793
void qmp_migrate_start_postcopy(Error **errp)
{
    MigrationState *s = migrate_get_current();

    if (!migrate_postcopy_ram()) {
794
        error_setg(errp, "Enable postcopy with migrate_set_capability before"
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
                         " the start of migration");
        return;
    }

    if (s->state == MIGRATION_STATUS_NONE) {
        error_setg(errp, "Postcopy must be started after migration has been"
                         " started");
        return;
    }
    /*
     * we don't error if migration has finished since that would be racy
     * with issuing this command.
     */
    atomic_set(&s->start_postcopy, true);
}

811 812
/* shared migration helpers */

813
void migrate_set_state(int *state, int old_state, int new_state)
814
{
815
    if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
816
        trace_migrate_set_state(new_state);
817
        migrate_generate_event(new_state);
818 819 820
    }
}

821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
void migrate_set_block_enabled(bool value, Error **errp)
{
    MigrationCapabilityStatusList *cap;

    cap = g_new0(MigrationCapabilityStatusList, 1);
    cap->value = g_new0(MigrationCapabilityStatus, 1);
    cap->value->capability = MIGRATION_CAPABILITY_BLOCK;
    cap->value->state = value;
    qmp_migrate_set_capabilities(cap, errp);
    qapi_free_MigrationCapabilityStatusList(cap);
}

static void migrate_set_block_incremental(MigrationState *s, bool value)
{
    s->parameters.block_incremental = value;
}

static void block_cleanup_parameters(MigrationState *s)
{
    if (s->must_remove_block_options) {
        /* setting to false can never fail */
        migrate_set_block_enabled(false, &error_abort);
        migrate_set_block_incremental(s, false);
        s->must_remove_block_options = false;
    }
}

848
static void migrate_fd_cleanup(void *opaque)
849
{
850 851 852 853 854
    MigrationState *s = opaque;

    qemu_bh_delete(s->cleanup_bh);
    s->cleanup_bh = NULL;

855
    if (s->to_dst_file) {
856
        trace_migrate_fd_cleanup();
857
        qemu_mutex_unlock_iothread();
858 859 860 861
        if (s->migration_thread_running) {
            qemu_thread_join(&s->thread);
            s->migration_thread_running = false;
        }
862 863
        qemu_mutex_lock_iothread();

864 865
        qemu_fclose(s->to_dst_file);
        s->to_dst_file = NULL;
866 867
    }

868 869
    assert((s->state != MIGRATION_STATUS_ACTIVE) &&
           (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
870

871
    if (s->state == MIGRATION_STATUS_CANCELLING) {
872
        migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
873
                          MIGRATION_STATUS_CANCELLED);
874
    }
875 876

    notifier_list_notify(&migration_state_notifiers, s);
877
    block_cleanup_parameters(s);
878 879
}

880
void migrate_fd_error(MigrationState *s, const Error *error)
881
{
882
    trace_migrate_fd_error(error_get_pretty(error));
883
    assert(s->to_dst_file == NULL);
884 885
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_FAILED);
886 887 888
    if (!s->error) {
        s->error = error_copy(error);
    }
889
    notifier_list_notify(&migration_state_notifiers, s);
890
    block_cleanup_parameters(s);
891 892
}

893
static void migrate_fd_cancel(MigrationState *s)
894
{
895
    int old_state ;
896
    QEMUFile *f = migrate_get_current()->to_dst_file;
897
    trace_migrate_fd_cancel();
898

899 900 901 902 903
    if (s->rp_state.from_dst_file) {
        /* shutdown the rp socket, so causing the rp thread to shutdown */
        qemu_file_shutdown(s->rp_state.from_dst_file);
    }

904 905
    do {
        old_state = s->state;
906
        if (!migration_is_setup_or_active(old_state)) {
907 908
            break;
        }
909
        migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
910
    } while (s->state != MIGRATION_STATUS_CANCELLING);
911 912 913 914 915 916 917 918

    /*
     * If we're unlucky the migration code might be stuck somewhere in a
     * send/write while the network has failed and is waiting to timeout;
     * if we've got shutdown(2) available then we can force it to quit.
     * The outgoing qemu file gets closed in migrate_fd_cleanup that is
     * called in a bh, so there is no race against this cancel.
     */
919
    if (s->state == MIGRATION_STATUS_CANCELLING && f) {
920 921
        qemu_file_shutdown(f);
    }
922 923 924 925 926 927 928 929 930 931
    if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
        Error *local_err = NULL;

        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
        } else {
            s->block_inactive = false;
        }
    }
932
    block_cleanup_parameters(s);
933 934
}

935 936 937 938 939 940 941
void add_migration_state_change_notifier(Notifier *notify)
{
    notifier_list_add(&migration_state_notifiers, notify);
}

void remove_migration_state_change_notifier(Notifier *notify)
{
P
Paolo Bonzini 已提交
942
    notifier_remove(notify);
943 944
}

S
Stefan Hajnoczi 已提交
945
bool migration_in_setup(MigrationState *s)
946
{
947
    return s->state == MIGRATION_STATUS_SETUP;
948 949
}

950
bool migration_has_finished(MigrationState *s)
951
{
952
    return s->state == MIGRATION_STATUS_COMPLETED;
953
}
954

955 956
bool migration_has_failed(MigrationState *s)
{
957 958
    return (s->state == MIGRATION_STATUS_CANCELLED ||
            s->state == MIGRATION_STATUS_FAILED);
959 960
}

961
bool migration_in_postcopy(void)
962
{
963 964
    MigrationState *s = migrate_get_current();

965 966 967
    return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
}

968 969
bool migration_in_postcopy_after_devices(MigrationState *s)
{
970
    return migration_in_postcopy() && s->postcopy_after_devices;
971 972
}

973
bool migration_is_idle(void)
974
{
975
    MigrationState *s = migrate_get_current();
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995

    switch (s->state) {
    case MIGRATION_STATUS_NONE:
    case MIGRATION_STATUS_CANCELLED:
    case MIGRATION_STATUS_COMPLETED:
    case MIGRATION_STATUS_FAILED:
        return true;
    case MIGRATION_STATUS_SETUP:
    case MIGRATION_STATUS_CANCELLING:
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
    case MIGRATION_STATUS_COLO:
        return false;
    case MIGRATION_STATUS__MAX:
        g_assert_not_reached();
    }

    return false;
}

996
MigrationState *migrate_init(void)
997
{
998
    MigrationState *s = migrate_get_current();
O
Orit Wasserman 已提交
999

1000 1001 1002 1003 1004 1005 1006 1007
    /*
     * Reinitialise all migration state, except
     * parameters/capabilities that the user set, and
     * locks.
     */
    s->bytes_xfer = 0;
    s->xfer_limit = 0;
    s->cleanup_bh = 0;
1008
    s->to_dst_file = NULL;
1009 1010 1011 1012 1013 1014 1015 1016
    s->state = MIGRATION_STATUS_NONE;
    s->rp_state.from_dst_file = NULL;
    s->rp_state.error = false;
    s->mbps = 0.0;
    s->downtime = 0;
    s->expected_downtime = 0;
    s->setup_time = 0;
    s->start_postcopy = false;
1017
    s->postcopy_after_devices = false;
1018
    s->migration_thread_running = false;
1019 1020
    error_free(s->error);
    s->error = NULL;
1021

1022
    migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1023

1024
    s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1025 1026
    return s;
}
1027

A
Anthony Liguori 已提交
1028 1029
static GSList *migration_blockers;

1030
int migrate_add_blocker(Error *reason, Error **errp)
A
Anthony Liguori 已提交
1031
{
1032
    if (migrate_get_current()->only_migratable) {
1033 1034 1035 1036 1037 1038
        error_propagate(errp, error_copy(reason));
        error_prepend(errp, "disallowing migration blocker "
                          "(--only_migratable) for: ");
        return -EACCES;
    }

1039
    if (migration_is_idle()) {
1040 1041 1042 1043 1044 1045 1046 1047
        migration_blockers = g_slist_prepend(migration_blockers, reason);
        return 0;
    }

    error_propagate(errp, error_copy(reason));
    error_prepend(errp, "disallowing migration blocker (migration in "
                      "progress) for: ");
    return -EBUSY;
A
Anthony Liguori 已提交
1048 1049 1050 1051 1052 1053 1054
}

void migrate_del_blocker(Error *reason)
{
    migration_blockers = g_slist_remove(migration_blockers, reason);
}

D
Dr. David Alan Gilbert 已提交
1055 1056 1057
void qmp_migrate_incoming(const char *uri, Error **errp)
{
    Error *local_err = NULL;
1058
    static bool once = true;
D
Dr. David Alan Gilbert 已提交
1059 1060

    if (!deferred_incoming) {
1061
        error_setg(errp, "For use with '-incoming defer'");
D
Dr. David Alan Gilbert 已提交
1062 1063
        return;
    }
1064 1065 1066
    if (!once) {
        error_setg(errp, "The incoming migration has already been started");
    }
D
Dr. David Alan Gilbert 已提交
1067 1068 1069 1070 1071 1072 1073 1074

    qemu_start_incoming_migration(uri, &local_err);

    if (local_err) {
        error_propagate(errp, local_err);
        return;
    }

1075
    once = false;
D
Dr. David Alan Gilbert 已提交
1076 1077
}

1078 1079 1080 1081 1082 1083 1084
bool migration_is_blocked(Error **errp)
{
    if (qemu_savevm_state_blocked(errp)) {
        return true;
    }

    if (migration_blockers) {
1085
        error_propagate(errp, error_copy(migration_blockers->data));
1086 1087 1088 1089 1090 1091
        return true;
    }

    return false;
}

L
Luiz Capitulino 已提交
1092 1093 1094
void qmp_migrate(const char *uri, bool has_blk, bool blk,
                 bool has_inc, bool inc, bool has_detach, bool detach,
                 Error **errp)
1095
{
1096
    Error *local_err = NULL;
1097
    MigrationState *s = migrate_get_current();
1098 1099
    const char *p;

1100
    if (migration_is_setup_or_active(s->state) ||
1101 1102
        s->state == MIGRATION_STATUS_CANCELLING ||
        s->state == MIGRATION_STATUS_COLO) {
1103
        error_setg(errp, QERR_MIGRATION_ACTIVE);
L
Luiz Capitulino 已提交
1104
        return;
1105
    }
1106 1107 1108 1109 1110
    if (runstate_check(RUN_STATE_INMIGRATE)) {
        error_setg(errp, "Guest is waiting for an incoming migration");
        return;
    }

1111
    if (migration_is_blocked(errp)) {
L
Luiz Capitulino 已提交
1112
        return;
A
Anthony Liguori 已提交
1113 1114
    }

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
    if ((has_blk && blk) || (has_inc && inc)) {
        if (migrate_use_block() || migrate_use_block_incremental()) {
            error_setg(errp, "Command options are incompatible with "
                       "current migration capabilities");
            return;
        }
        migrate_set_block_enabled(true, &local_err);
        if (local_err) {
            error_propagate(errp, local_err);
            return;
        }
        s->must_remove_block_options = true;
    }

    if (has_inc && inc) {
        migrate_set_block_incremental(s, true);
    }

1133
    s = migrate_init();
1134 1135

    if (strstart(uri, "tcp:", &p)) {
1136
        tcp_start_outgoing_migration(s, p, &local_err);
M
Michael R. Hines 已提交
1137
#ifdef CONFIG_RDMA
1138
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
1139 1140
        rdma_start_outgoing_migration(s, p, &local_err);
#endif
1141
    } else if (strstart(uri, "exec:", &p)) {
1142
        exec_start_outgoing_migration(s, p, &local_err);
1143
    } else if (strstart(uri, "unix:", &p)) {
1144
        unix_start_outgoing_migration(s, p, &local_err);
1145
    } else if (strstart(uri, "fd:", &p)) {
1146
        fd_start_outgoing_migration(s, p, &local_err);
1147
    } else {
1148 1149
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
                   "a valid migration protocol");
1150 1151
        migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                          MIGRATION_STATUS_FAILED);
L
Luiz Capitulino 已提交
1152
        return;
1153 1154
    }

1155
    if (local_err) {
1156
        migrate_fd_error(s, local_err);
1157
        error_propagate(errp, local_err);
L
Luiz Capitulino 已提交
1158
        return;
1159
    }
1160 1161
}

L
Luiz Capitulino 已提交
1162
void qmp_migrate_cancel(Error **errp)
1163
{
1164
    migrate_fd_cancel(migrate_get_current());
1165 1166
}

1167 1168 1169
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
{
    MigrationState *s = migrate_get_current();
1170
    int64_t new_size;
1171 1172 1173

    /* Check for truncation */
    if (value != (size_t)value) {
1174 1175
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeding address space");
1176 1177 1178
        return;
    }

1179 1180
    /* Cache should not be larger than guest ram size */
    if (value > ram_bytes_total()) {
1181 1182
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeds guest ram size ");
1183 1184 1185
        return;
    }

1186 1187
    new_size = xbzrle_cache_resize(value);
    if (new_size < 0) {
1188 1189
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "is smaller than page size");
1190 1191 1192 1193
        return;
    }

    s->xbzrle_cache_size = new_size;
1194 1195 1196 1197 1198 1199 1200
}

int64_t qmp_query_migrate_cache_size(Error **errp)
{
    return migrate_xbzrle_cache_size();
}

L
Luiz Capitulino 已提交
1201
void qmp_migrate_set_speed(int64_t value, Error **errp)
1202
{
1203 1204 1205 1206
    MigrationParameters p = {
        .has_max_bandwidth = true,
        .max_bandwidth = value,
    };
1207

1208
    qmp_migrate_set_parameters(&p, errp);
1209 1210
}

1211
void qmp_migrate_set_downtime(double value, Error **errp)
1212
{
1213 1214 1215 1216 1217 1218 1219
    if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
        error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
                         "the range of 0 to %d seconds",
                         MAX_MIGRATE_DOWNTIME_SECONDS);
        return;
    }

1220 1221 1222 1223 1224 1225 1226 1227 1228
    value *= 1000; /* Convert to milliseconds */
    value = MAX(0, MIN(INT64_MAX, value));

    MigrationParameters p = {
        .has_downtime_limit = true,
        .downtime_limit = value,
    };

    qmp_migrate_set_parameters(&p, errp);
1229
}
1230

1231 1232 1233 1234 1235 1236 1237 1238 1239
bool migrate_release_ram(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
}

1240 1241 1242 1243 1244 1245
bool migrate_postcopy_ram(void)
{
    MigrationState *s;

    s = migrate_get_current();

D
Dr. David Alan Gilbert 已提交
1246
    return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1247 1248
}

1249 1250 1251 1252 1253 1254 1255 1256 1257
bool migrate_auto_converge(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
}

1258 1259 1260 1261 1262 1263 1264 1265 1266
bool migrate_zero_blocks(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
}

1267 1268
bool migrate_use_compression(void)
{
1269 1270 1271 1272 1273
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1274 1275 1276 1277 1278 1279 1280 1281
}

int migrate_compress_level(void)
{
    MigrationState *s;

    s = migrate_get_current();

1282
    return s->parameters.compress_level;
1283 1284 1285 1286 1287 1288 1289 1290
}

int migrate_compress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

1291
    return s->parameters.compress_threads;
1292 1293
}

1294 1295 1296 1297 1298 1299
int migrate_decompress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

1300
    return s->parameters.decompress_threads;
1301 1302
}

1303 1304 1305 1306 1307 1308 1309 1310 1311
bool migrate_use_events(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
}

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
int migrate_use_xbzrle(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
}

int64_t migrate_xbzrle_cache_size(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->xbzrle_cache_size;
}
1329

1330 1331 1332 1333 1334 1335 1336 1337 1338
bool migrate_use_block(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
}

1339 1340 1341 1342 1343 1344 1345 1346 1347
bool migrate_use_return_path(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
}

1348 1349 1350 1351 1352 1353 1354 1355 1356
bool migrate_use_block_incremental(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->parameters.block_incremental;
}

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
/* migration thread support */
/*
 * Something bad happened to the RP stream, mark an error
 * The caller shall print or trace something to indicate why
 */
static void mark_source_rp_bad(MigrationState *s)
{
    s->rp_state.error = true;
}

static struct rp_cmd_args {
    ssize_t     len; /* -1 = variable */
    const char *name;
} rp_cmd_args[] = {
    [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
    [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
    [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
1374 1375
    [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
    [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
1376 1377 1378
    [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
};

1379 1380 1381 1382 1383 1384 1385 1386
/*
 * Process a request for pages received on the return path,
 * We're allowed to send more than requested (e.g. to round to our page size)
 * and we don't need to send pages that have already been sent.
 */
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
                                       ram_addr_t start, size_t len)
{
1387 1388
    long our_host_ps = getpagesize();

1389
    trace_migrate_handle_rp_req_pages(rbname, start, len);
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402

    /*
     * Since we currently insist on matching page sizes, just sanity check
     * we're being asked for whole host pages.
     */
    if (start & (our_host_ps-1) ||
       (len & (our_host_ps-1))) {
        error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
                     " len: %zd", __func__, start, len);
        mark_source_rp_bad(ms);
        return;
    }

1403
    if (ram_save_queue_pages(rbname, start, len)) {
1404 1405
        mark_source_rp_bad(ms);
    }
1406 1407
}

1408 1409 1410 1411 1412 1413 1414 1415 1416
/*
 * Handles messages sent on the return path towards the source VM
 *
 */
static void *source_return_path_thread(void *opaque)
{
    MigrationState *ms = opaque;
    QEMUFile *rp = ms->rp_state.from_dst_file;
    uint16_t header_len, header_type;
1417
    uint8_t buf[512];
1418
    uint32_t tmp32, sibling_error;
1419 1420
    ram_addr_t start = 0; /* =0 to silence warning */
    size_t  len = 0, expected_len;
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
    int res;

    trace_source_return_path_thread_entry();
    while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
           migration_is_setup_or_active(ms->state)) {
        trace_source_return_path_thread_loop_top();
        header_type = qemu_get_be16(rp);
        header_len = qemu_get_be16(rp);

        if (header_type >= MIG_RP_MSG_MAX ||
            header_type == MIG_RP_MSG_INVALID) {
            error_report("RP: Received invalid message 0x%04x length 0x%04x",
                    header_type, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        if ((rp_cmd_args[header_type].len != -1 &&
            header_len != rp_cmd_args[header_type].len) ||
1440
            header_len > sizeof(buf)) {
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
            error_report("RP: Received '%s' message (0x%04x) with"
                    "incorrect length %d expecting %zu",
                    rp_cmd_args[header_type].name, header_type, header_len,
                    (size_t)rp_cmd_args[header_type].len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* We know we've got a valid header by this point */
        res = qemu_get_buffer(rp, buf, header_len);
        if (res != header_len) {
            error_report("RP: Failed reading data for message 0x%04x"
                         " read %d expected %d",
                         header_type, res, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* OK, we have the message and the data */
        switch (header_type) {
        case MIG_RP_MSG_SHUT:
1462
            sibling_error = ldl_be_p(buf);
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
            trace_source_return_path_thread_shut(sibling_error);
            if (sibling_error) {
                error_report("RP: Sibling indicated error %d", sibling_error);
                mark_source_rp_bad(ms);
            }
            /*
             * We'll let the main thread deal with closing the RP
             * we could do a shutdown(2) on it, but we're the only user
             * anyway, so there's nothing gained.
             */
            goto out;

        case MIG_RP_MSG_PONG:
1476
            tmp32 = ldl_be_p(buf);
1477 1478 1479
            trace_source_return_path_thread_pong(tmp32);
            break;

1480
        case MIG_RP_MSG_REQ_PAGES:
1481 1482
            start = ldq_be_p(buf);
            len = ldl_be_p(buf + 8);
1483 1484 1485 1486 1487 1488 1489
            migrate_handle_rp_req_pages(ms, NULL, start, len);
            break;

        case MIG_RP_MSG_REQ_PAGES_ID:
            expected_len = 12 + 1; /* header + termination */

            if (header_len >= expected_len) {
1490 1491
                start = ldq_be_p(buf);
                len = ldl_be_p(buf + 8);
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
                /* Now we expect an idstr */
                tmp32 = buf[12]; /* Length of the following idstr */
                buf[13 + tmp32] = '\0';
                expected_len += tmp32;
            }
            if (header_len != expected_len) {
                error_report("RP: Req_Page_id with length %d expecting %zd",
                        header_len, expected_len);
                mark_source_rp_bad(ms);
                goto out;
            }
            migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
            break;

1506 1507 1508 1509
        default:
            break;
        }
    }
D
Dr. David Alan Gilbert 已提交
1510
    if (qemu_file_get_error(rp)) {
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
        trace_source_return_path_thread_bad_end();
        mark_source_rp_bad(ms);
    }

    trace_source_return_path_thread_end();
out:
    ms->rp_state.from_dst_file = NULL;
    qemu_fclose(rp);
    return NULL;
}

static int open_return_path_on_source(MigrationState *ms)
{

1525
    ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
    if (!ms->rp_state.from_dst_file) {
        return -1;
    }

    trace_open_return_path_on_source();
    qemu_thread_create(&ms->rp_state.rp_thread, "return path",
                       source_return_path_thread, ms, QEMU_THREAD_JOINABLE);

    trace_open_return_path_on_source_continue();

    return 0;
}

/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
static int await_return_path_close_on_source(MigrationState *ms)
{
    /*
     * If this is a normal exit then the destination will send a SHUT and the
     * rp_thread will exit, however if there's an error we need to cause
     * it to exit.
     */
1547
    if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
        /*
         * shutdown(2), if we have it, will cause it to unblock if it's stuck
         * waiting for the destination.
         */
        qemu_file_shutdown(ms->rp_state.from_dst_file);
        mark_source_rp_bad(ms);
    }
    trace_await_return_path_close_on_source_joining();
    qemu_thread_join(&ms->rp_state.rp_thread);
    trace_await_return_path_close_on_source_close();
    return ms->rp_state.error;
}

1561 1562 1563 1564 1565 1566 1567
/*
 * Switch from normal iteration to postcopy
 * Returns non-0 on error
 */
static int postcopy_start(MigrationState *ms, bool *old_vm_running)
{
    int ret;
1568 1569
    QIOChannelBuffer *bioc;
    QEMUFile *fb;
1570
    int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1571
    bool restart_block = false;
1572
    migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
                      MIGRATION_STATUS_POSTCOPY_ACTIVE);

    trace_postcopy_start();
    qemu_mutex_lock_iothread();
    trace_postcopy_start_set_run();

    qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
    *old_vm_running = runstate_is_running();
    global_state_store();
    ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1583 1584 1585
    if (ret < 0) {
        goto fail;
    }
1586

1587
    ret = bdrv_inactivate_all();
1588 1589 1590
    if (ret < 0) {
        goto fail;
    }
1591
    restart_block = true;
1592

1593 1594 1595 1596
    /*
     * Cause any non-postcopiable, but iterative devices to
     * send out their final data.
     */
1597
    qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
1598

1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
    /*
     * in Finish migrate and with the io-lock held everything should
     * be quiet, but we've potentially still got dirty pages and we
     * need to tell the destination to throw any pages it's already received
     * that are dirty
     */
    if (ram_postcopy_send_discard_bitmap(ms)) {
        error_report("postcopy send discard bitmap failed");
        goto fail;
    }

    /*
     * send rest of state - note things that are doing postcopy
     * will notice we're in POSTCOPY_ACTIVE and not actually
     * wrap their state up here
     */
1615
    qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1616
    /* Ping just for debugging, helps line traces up */
1617
    qemu_savevm_send_ping(ms->to_dst_file, 2);
1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629

    /*
     * While loading the device state we may trigger page transfer
     * requests and the fd must be free to process those, and thus
     * the destination must read the whole device state off the fd before
     * it starts processing it.  Unfortunately the ad-hoc migration format
     * doesn't allow the destination to know the size to read without fully
     * parsing it through each devices load-state code (especially the open
     * coded devices that use get/put).
     * So we wrap the device state up in a package with a length at the start;
     * to do this we use a qemu_buf to hold the whole of the device state.
     */
1630
    bioc = qio_channel_buffer_new(4096);
1631
    qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
1632 1633
    fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
    object_unref(OBJECT(bioc));
1634

1635 1636 1637 1638 1639 1640
    /*
     * Make sure the receiver can get incoming pages before we send the rest
     * of the state
     */
    qemu_savevm_send_postcopy_listen(fb);

1641
    qemu_savevm_state_complete_precopy(fb, false, false);
1642 1643 1644 1645 1646 1647
    qemu_savevm_send_ping(fb, 3);

    qemu_savevm_send_postcopy_run(fb);

    /* <><> end of stuff going into the package */

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
    /* Last point of recovery; as soon as we send the package the destination
     * can open devices and potentially start running.
     * Lets just check again we've not got any errors.
     */
    ret = qemu_file_get_error(ms->to_dst_file);
    if (ret) {
        error_report("postcopy_start: Migration stream errored (pre package)");
        goto fail_closefb;
    }

    restart_block = false;

1660
    /* Now send that blob */
1661
    if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1662 1663 1664
        goto fail_closefb;
    }
    qemu_fclose(fb);
1665 1666 1667 1668 1669 1670 1671 1672

    /* Send a notify to give a chance for anything that needs to happen
     * at the transition to postcopy and after the device state; in particular
     * spice needs to trigger a transition now
     */
    ms->postcopy_after_devices = true;
    notifier_list_notify(&migration_state_notifiers, ms);

1673 1674 1675 1676 1677 1678 1679 1680
    ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;

    qemu_mutex_unlock_iothread();

    /*
     * Although this ping is just for debug, it could potentially be
     * used for getting a better measurement of downtime at the source.
     */
1681
    qemu_savevm_send_ping(ms->to_dst_file, 4);
1682

1683 1684 1685 1686
    if (migrate_release_ram()) {
        ram_postcopy_migrated_memory_release(ms);
    }

1687
    ret = qemu_file_get_error(ms->to_dst_file);
1688 1689
    if (ret) {
        error_report("postcopy_start: Migration stream errored");
1690
        migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1691 1692 1693 1694 1695 1696 1697 1698
                              MIGRATION_STATUS_FAILED);
    }

    return ret;

fail_closefb:
    qemu_fclose(fb);
fail:
1699
    migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1700
                          MIGRATION_STATUS_FAILED);
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
    if (restart_block) {
        /* A failure happened early enough that we know the destination hasn't
         * accessed block devices, so we're safe to recover.
         */
        Error *local_err = NULL;

        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
        }
    }
1712 1713 1714 1715
    qemu_mutex_unlock_iothread();
    return -1;
}

1716 1717 1718 1719 1720
/**
 * migration_completion: Used by migration_thread when there's not much left.
 *   The caller 'breaks' the loop when this returns.
 *
 * @s: Current migration state
1721
 * @current_active_state: The migration state we expect to be in
1722 1723 1724
 * @*old_vm_running: Pointer to old_vm_running flag
 * @*start_time: Pointer to time to update
 */
1725 1726
static void migration_completion(MigrationState *s, int current_active_state,
                                 bool *old_vm_running,
1727 1728 1729 1730
                                 int64_t *start_time)
{
    int ret;

1731 1732 1733 1734 1735 1736 1737 1738
    if (s->state == MIGRATION_STATUS_ACTIVE) {
        qemu_mutex_lock_iothread();
        *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
        qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
        *old_vm_running = runstate_is_running();
        ret = global_state_store();

        if (!ret) {
1739
            bool inactivate = !migrate_colo_enabled();
1740
            ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1741 1742
            if (ret >= 0) {
                qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1743 1744
                ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
                                                         inactivate);
1745
            }
1746 1747
            if (inactivate && ret >= 0) {
                s->block_inactive = true;
1748 1749 1750
            }
        }
        qemu_mutex_unlock_iothread();
1751

1752 1753
        if (ret < 0) {
            goto fail;
1754
        }
1755 1756 1757
    } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
        trace_migration_completion_postcopy_end();

1758
        qemu_savevm_state_complete_postcopy(s->to_dst_file);
1759
        trace_migration_completion_postcopy_end_after_complete();
1760 1761
    }

1762 1763 1764 1765 1766 1767
    /*
     * If rp was opened we must clean up the thread before
     * cleaning everything else up (since if there are no failures
     * it will wait for the destination to send it's status in
     * a SHUT command).
     */
P
Peter Xu 已提交
1768
    if (s->rp_state.from_dst_file) {
1769
        int rp_error;
P
Peter Xu 已提交
1770
        trace_migration_return_path_end_before();
1771
        rp_error = await_return_path_close_on_source(s);
P
Peter Xu 已提交
1772
        trace_migration_return_path_end_after(rp_error);
1773
        if (rp_error) {
1774
            goto fail_invalidate;
1775
        }
1776 1777
    }

1778
    if (qemu_file_get_error(s->to_dst_file)) {
1779
        trace_migration_completion_file_err();
1780
        goto fail_invalidate;
1781 1782
    }

1783 1784 1785 1786 1787
    if (!migrate_colo_enabled()) {
        migrate_set_state(&s->state, current_active_state,
                          MIGRATION_STATUS_COMPLETED);
    }

1788 1789
    return;

1790 1791 1792 1793 1794 1795 1796
fail_invalidate:
    /* If not doing postcopy, vm_start() will be called: let's regain
     * control on images.
     */
    if (s->state == MIGRATION_STATUS_ACTIVE) {
        Error *local_err = NULL;

1797
        qemu_mutex_lock_iothread();
1798 1799 1800
        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
1801 1802
        } else {
            s->block_inactive = false;
1803
        }
1804
        qemu_mutex_unlock_iothread();
1805 1806
    }

1807
fail:
1808 1809
    migrate_set_state(&s->state, current_active_state,
                      MIGRATION_STATUS_FAILED);
1810 1811
}

1812 1813 1814 1815 1816 1817
bool migrate_colo_enabled(void)
{
    MigrationState *s = migrate_get_current();
    return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
}

1818 1819 1820 1821
/*
 * Master migration thread on the source VM.
 * It drives the migration and pumps the data down the outgoing channel.
 */
J
Juan Quintela 已提交
1822
static void *migration_thread(void *opaque)
1823
{
1824
    MigrationState *s = opaque;
1825
    /* Used by the bandwidth calcs, updated later */
1826 1827
    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
    int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1828
    int64_t initial_bytes = 0;
1829 1830 1831 1832 1833 1834
    /*
     * The final stage happens when the remaining data is smaller than
     * this threshold; it's calculated from the requested downtime and
     * measured bandwidth
     */
    int64_t threshold_size = 0;
1835
    int64_t start_time = initial_time;
1836
    int64_t end_time;
1837
    bool old_vm_running = false;
1838 1839 1840
    bool entered_postcopy = false;
    /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
    enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1841
    bool enable_colo = migrate_colo_enabled();
1842

1843 1844
    rcu_register_thread();

1845
    qemu_savevm_state_header(s->to_dst_file);
1846

1847 1848 1849 1850 1851
    /*
     * If we opened the return path, we need to make sure dst has it
     * opened as well.
     */
    if (s->rp_state.from_dst_file) {
1852
        /* Now tell the dest that it should open its end so it can reply */
1853
        qemu_savevm_send_open_return_path(s->to_dst_file);
1854 1855

        /* And do a ping that will make stuff easier to debug */
1856
        qemu_savevm_send_ping(s->to_dst_file, 1);
P
Peter Xu 已提交
1857
    }
1858

P
Peter Xu 已提交
1859
    if (migrate_postcopy_ram()) {
1860 1861 1862 1863 1864
        /*
         * Tell the destination that we *might* want to do postcopy later;
         * if the other end can't do postcopy it should fail now, nice and
         * early.
         */
1865
        qemu_savevm_send_postcopy_advise(s->to_dst_file);
1866 1867
    }

1868
    qemu_savevm_state_setup(s->to_dst_file);
1869

1870
    s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1871 1872
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_ACTIVE);
1873

1874 1875 1876 1877
    trace_migration_thread_setup_complete();

    while (s->state == MIGRATION_STATUS_ACTIVE ||
           s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1878
        int64_t current_time;
1879
        uint64_t pending_size;
1880

1881
        if (!qemu_file_rate_limit(s->to_dst_file)) {
1882 1883
            uint64_t pend_post, pend_nonpost;

1884 1885
            qemu_savevm_state_pending(s->to_dst_file, threshold_size,
                                      &pend_nonpost, &pend_post);
1886
            pending_size = pend_nonpost + pend_post;
1887
            trace_migrate_pending(pending_size, threshold_size,
1888
                                  pend_post, pend_nonpost);
1889
            if (pending_size && pending_size >= threshold_size) {
1890 1891 1892 1893
                /* Still a significant amount to transfer */

                if (migrate_postcopy_ram() &&
                    s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1894
                    pend_nonpost <= threshold_size &&
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
                    atomic_read(&s->start_postcopy)) {

                    if (!postcopy_start(s, &old_vm_running)) {
                        current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
                        entered_postcopy = true;
                    }

                    continue;
                }
                /* Just another iteration step */
1905
                qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1906
            } else {
1907
                trace_migration_thread_low_pending(pending_size);
1908
                migration_completion(s, current_active_state,
1909
                                     &old_vm_running, &start_time);
1910
                break;
1911 1912
            }
        }
1913

1914
        if (qemu_file_get_error(s->to_dst_file)) {
1915 1916
            migrate_set_state(&s->state, current_active_state,
                              MIGRATION_STATUS_FAILED);
1917
            trace_migration_thread_file_err();
1918 1919
            break;
        }
1920
        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1921
        if (current_time >= initial_time + BUFFER_DELAY) {
1922 1923
            uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
                                         initial_bytes;
1924
            uint64_t time_spent = current_time - initial_time;
1925
            double bandwidth = (double)transferred_bytes / time_spent;
1926
            threshold_size = bandwidth * s->parameters.downtime_limit;
1927

1928 1929
            s->mbps = (((double) transferred_bytes * 8.0) /
                    ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1930

1931
            trace_migrate_transferred(transferred_bytes, time_spent,
1932
                                      bandwidth, threshold_size);
1933 1934
            /* if we haven't sent anything, we don't want to recalculate
               10000 is a small enough number for our purposes */
1935 1936
            if (ram_counters.dirty_pages_rate && transferred_bytes > 10000) {
                s->expected_downtime = ram_counters.dirty_pages_rate *
1937
                    qemu_target_page_size() / bandwidth;
1938
            }
1939

1940
            qemu_file_reset_rate_limit(s->to_dst_file);
1941
            initial_time = current_time;
1942
            initial_bytes = qemu_ftell(s->to_dst_file);
1943
        }
1944
        if (qemu_file_rate_limit(s->to_dst_file)) {
1945 1946 1947
            /* usleep expects microseconds */
            g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
        }
1948 1949
    }

1950
    trace_migration_thread_after_loop();
1951 1952
    /* If we enabled cpu throttling for auto-converge, turn it off. */
    cpu_throttle_stop();
1953
    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1954

1955
    qemu_mutex_lock_iothread();
1956 1957 1958 1959 1960 1961 1962
    /*
     * The resource has been allocated by migration will be reused in COLO
     * process, so don't release them.
     */
    if (!enable_colo) {
        qemu_savevm_state_cleanup();
    }
1963
    if (s->state == MIGRATION_STATUS_COMPLETED) {
1964
        uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1965
        s->total_time = end_time - s->total_time;
1966 1967 1968
        if (!entered_postcopy) {
            s->downtime = end_time - start_time;
        }
1969 1970 1971 1972
        if (s->total_time) {
            s->mbps = (((double) transferred_bytes * 8.0) /
                       ((double) s->total_time)) / 1000;
        }
1973 1974
        runstate_set(RUN_STATE_POSTMIGRATE);
    } else {
1975 1976 1977 1978 1979 1980 1981 1982 1983
        if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
            migrate_start_colo_process(s);
            qemu_savevm_state_cleanup();
            /*
            * Fixme: we will run VM in COLO no matter its old running state.
            * After exited COLO, we will keep running.
            */
            old_vm_running = true;
        }
1984
        if (old_vm_running && !entered_postcopy) {
1985
            vm_start();
1986 1987 1988 1989
        } else {
            if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
                runstate_set(RUN_STATE_POSTMIGRATE);
            }
1990
        }
1991
    }
1992
    qemu_bh_schedule(s->cleanup_bh);
1993
    qemu_mutex_unlock_iothread();
1994

1995
    rcu_unregister_thread();
1996 1997 1998
    return NULL;
}

1999
void migrate_fd_connect(MigrationState *s)
2000
{
2001
    s->expected_downtime = s->parameters.downtime_limit;
2002
    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
2003

2004
    qemu_file_set_blocking(s->to_dst_file, true);
2005
    qemu_file_set_rate_limit(s->to_dst_file,
2006
                             s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
2007

2008 2009 2010
    /* Notify before starting migration thread */
    notifier_list_notify(&migration_state_notifiers, s);

2011
    /*
2012 2013 2014
     * Open the return path. For postcopy, it is used exclusively. For
     * precopy, only if user specified "return-path" capability would
     * QEMU uses the return path.
2015
     */
2016
    if (migrate_postcopy_ram() || migrate_use_return_path()) {
2017 2018
        if (open_return_path_on_source(s)) {
            error_report("Unable to open return-path for postcopy");
2019
            migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2020 2021 2022 2023 2024 2025
                              MIGRATION_STATUS_FAILED);
            migrate_fd_cleanup(s);
            return;
        }
    }

2026
    qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
2027
                       QEMU_THREAD_JOINABLE);
2028
    s->migration_thread_running = true;
2029
}
2030

P
Peter Xu 已提交
2031 2032 2033 2034 2035 2036 2037 2038 2039 2040
void migration_global_dump(Monitor *mon)
{
    MigrationState *ms = migrate_get_current();

    monitor_printf(mon, "globals: store-global-state=%d, only_migratable=%d, "
                   "send-configuration=%d, send-section-footer=%d\n",
                   ms->store_global_state, ms->only_migratable,
                   ms->send_configuration, ms->send_section_footer);
}

2041 2042 2043
#define DEFINE_PROP_MIG_CAP(name, x)             \
    DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false)

2044 2045 2046
static Property migration_properties[] = {
    DEFINE_PROP_BOOL("store-global-state", MigrationState,
                     store_global_state, true),
2047
    DEFINE_PROP_BOOL("only-migratable", MigrationState, only_migratable, false),
2048 2049
    DEFINE_PROP_BOOL("send-configuration", MigrationState,
                     send_configuration, true),
P
Peter Xu 已提交
2050 2051
    DEFINE_PROP_BOOL("send-section-footer", MigrationState,
                     send_section_footer, true),
P
Peter Xu 已提交
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076

    /* Migration parameters */
    DEFINE_PROP_INT64("x-compress-level", MigrationState,
                      parameters.compress_level,
                      DEFAULT_MIGRATE_COMPRESS_LEVEL),
    DEFINE_PROP_INT64("x-compress-threads", MigrationState,
                      parameters.compress_threads,
                      DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
    DEFINE_PROP_INT64("x-decompress-threads", MigrationState,
                      parameters.decompress_threads,
                      DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
    DEFINE_PROP_INT64("x-cpu-throttle-initial", MigrationState,
                      parameters.cpu_throttle_initial,
                      DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
    DEFINE_PROP_INT64("x-cpu-throttle-increment", MigrationState,
                      parameters.cpu_throttle_increment,
                      DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
    DEFINE_PROP_INT64("x-max-bandwidth", MigrationState,
                      parameters.max_bandwidth, MAX_THROTTLE),
    DEFINE_PROP_INT64("x-downtime-limit", MigrationState,
                      parameters.downtime_limit,
                      DEFAULT_MIGRATE_SET_DOWNTIME),
    DEFINE_PROP_INT64("x-checkpoint-delay", MigrationState,
                      parameters.x_checkpoint_delay,
                      DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090

    /* Migration capabilities */
    DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE),
    DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL),
    DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE),
    DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS),
    DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS),
    DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS),
    DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
    DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
    DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
    DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK),
    DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),

2091 2092 2093
    DEFINE_PROP_END_OF_LIST(),
};

2094 2095 2096 2097 2098
static void migration_class_init(ObjectClass *klass, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(klass);

    dc->user_creatable = false;
2099
    dc->props = migration_properties;
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
}

static void migration_instance_init(Object *obj)
{
    MigrationState *ms = MIGRATION_OBJ(obj);

    ms->state = MIGRATION_STATUS_NONE;
    ms->xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE;
    ms->mbps = -1;
    ms->parameters.tls_creds = g_strdup("");
    ms->parameters.tls_hostname = g_strdup("");
}

static const TypeInfo migration_type = {
    .name = TYPE_MIGRATION,
2115
    /*
P
Peter Xu 已提交
2116 2117 2118 2119 2120 2121
     * NOTE: TYPE_MIGRATION is not really a device, as the object is
     * not created using qdev_create(), it is not attached to the qdev
     * device tree, and it is never realized.
     *
     * TODO: Make this TYPE_OBJECT once QOM provides something like
     * TYPE_DEVICE's "-global" properties.
2122
     */
2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
    .parent = TYPE_DEVICE,
    .class_init = migration_class_init,
    .class_size = sizeof(MigrationClass),
    .instance_size = sizeof(MigrationState),
    .instance_init = migration_instance_init,
};

static void register_migration_types(void)
{
    type_register_static(&migration_type);
}

type_init(register_migration_types);