migration.c 30.1 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * QEMU live migration
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
A
aliguori 已提交
14 15 16
 */

#include "qemu-common.h"
17
#include "qemu/error-report.h"
18
#include "qemu/main-loop.h"
19
#include "migration/migration.h"
20
#include "migration/qemu-file.h"
21
#include "sysemu/sysemu.h"
22
#include "block/block.h"
23
#include "qapi/qmp/qerror.h"
24
#include "qemu/sockets.h"
25
#include "qemu/rcu.h"
26
#include "migration/block.h"
27
#include "qemu/thread.h"
L
Luiz Capitulino 已提交
28
#include "qmp-commands.h"
29
#include "trace.h"
30
#include "qapi/util.h"
31
#include "qapi-event.h"
32

33
#define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
A
aliguori 已提交
34

J
Juan Quintela 已提交
35 36 37 38 39
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
 * data. */
#define BUFFER_DELAY     100
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)

40 41
/* Default compression thread count */
#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
42 43 44
/* Default decompression thread count, usually decompression is at
 * least 4 times as fast as compression.*/
#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
45 46 47
/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1

48 49 50
/* Migration XBZRLE default cache size */
#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)

51 52 53
static NotifierList migration_state_notifiers =
    NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);

D
Dr. David Alan Gilbert 已提交
54 55
static bool deferred_incoming;

56 57 58 59
/* When we add fault tolerance, we could have several
   migrations at once.  For now we don't need to add
   dynamic creation of migration */

60
/* For outgoing */
61
MigrationState *migrate_get_current(void)
62 63
{
    static MigrationState current_migration = {
64
        .state = MIGRATION_STATUS_NONE,
65
        .bandwidth_limit = MAX_THROTTLE,
66
        .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
67
        .mbps = -1,
68 69 70 71 72 73
        .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
                DEFAULT_MIGRATE_COMPRESS_LEVEL,
        .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
                DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
        .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
                DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
74 75 76 77 78
    };

    return &current_migration;
}

79 80 81 82 83 84 85 86 87 88 89 90
/* For incoming */
static MigrationIncomingState *mis_current;

MigrationIncomingState *migration_incoming_get_current(void)
{
    return mis_current;
}

MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
{
    mis_current = g_malloc0(sizeof(MigrationIncomingState));
    mis_current->file = f;
91
    QLIST_INIT(&mis_current->loadvm_handlers);
92 93 94 95 96 97

    return mis_current;
}

void migration_incoming_state_destroy(void)
{
98
    loadvm_free_handlers(mis_current);
99 100 101 102
    g_free(mis_current);
    mis_current = NULL;
}

103 104

typedef struct {
105
    bool optional;
106 107
    uint32_t size;
    uint8_t runstate[100];
108 109
    RunState state;
    bool received;
110 111 112 113
} GlobalState;

static GlobalState global_state;

114
int global_state_store(void)
115 116 117 118 119 120 121 122 123 124
{
    if (!runstate_store((char *)global_state.runstate,
                        sizeof(global_state.runstate))) {
        error_report("runstate name too big: %s", global_state.runstate);
        trace_migrate_state_too_big();
        return -EINVAL;
    }
    return 0;
}

125 126 127 128 129 130 131
void global_state_store_running(void)
{
    const char *state = RunState_lookup[RUN_STATE_RUNNING];
    strncpy((char *)global_state.runstate,
           state, sizeof(global_state.runstate));
}

132
static bool global_state_received(void)
133
{
134 135 136 137 138 139
    return global_state.received;
}

static RunState global_state_get_runstate(void)
{
    return global_state.state;
140 141
}

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
void global_state_set_optional(void)
{
    global_state.optional = true;
}

static bool global_state_needed(void *opaque)
{
    GlobalState *s = opaque;
    char *runstate = (char *)s->runstate;

    /* If it is not optional, it is mandatory */

    if (s->optional == false) {
        return true;
    }

    /* If state is running or paused, it is not needed */

    if (strcmp(runstate, "running") == 0 ||
        strcmp(runstate, "paused") == 0) {
        return false;
    }

    /* for any other state it is needed */
    return true;
}

169 170 171
static int global_state_post_load(void *opaque, int version_id)
{
    GlobalState *s = opaque;
172 173
    Error *local_err = NULL;
    int r;
174 175
    char *runstate = (char *)s->runstate;

176
    s->received = true;
177 178
    trace_migrate_global_state_post_load(runstate);

179
    r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE_MAX,
180 181
                                -1, &local_err);

182 183 184
    if (r == -1) {
        if (local_err) {
            error_report_err(local_err);
185
        }
186
        return -EINVAL;
187
    }
188
    s->state = r;
189

190
    return 0;
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
}

static void global_state_pre_save(void *opaque)
{
    GlobalState *s = opaque;

    trace_migrate_global_state_pre_save((char *)s->runstate);
    s->size = strlen((char *)s->runstate) + 1;
}

static const VMStateDescription vmstate_globalstate = {
    .name = "globalstate",
    .version_id = 1,
    .minimum_version_id = 1,
    .post_load = global_state_post_load,
    .pre_save = global_state_pre_save,
207
    .needed = global_state_needed,
208 209 210 211 212 213 214 215 216 217 218
    .fields = (VMStateField[]) {
        VMSTATE_UINT32(size, GlobalState),
        VMSTATE_BUFFER(runstate, GlobalState),
        VMSTATE_END_OF_LIST()
    },
};

void register_global_state(void)
{
    /* We would use it independently that we receive it */
    strcpy((char *)&global_state.runstate, "");
219
    global_state.received = false;
220 221 222
    vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
}

223 224 225 226 227 228 229
static void migrate_generate_event(int new_state)
{
    if (migrate_use_events()) {
        qapi_event_send_migration(new_state, &error_abort);
    }
}

D
Dr. David Alan Gilbert 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242
/*
 * Called on -incoming with a defer: uri.
 * The migration can be started later after any parameters have been
 * changed.
 */
static void deferred_incoming_migration(Error **errp)
{
    if (deferred_incoming) {
        error_setg(errp, "Incoming migration already deferred");
    }
    deferred_incoming = true;
}

243
void qemu_start_incoming_migration(const char *uri, Error **errp)
A
aliguori 已提交
244
{
A
aliguori 已提交
245 246
    const char *p;

247
    qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
D
Dr. David Alan Gilbert 已提交
248 249 250
    if (!strcmp(uri, "defer")) {
        deferred_incoming_migration(errp);
    } else if (strstart(uri, "tcp:", &p)) {
251
        tcp_start_incoming_migration(p, errp);
M
Michael R. Hines 已提交
252
#ifdef CONFIG_RDMA
D
Dr. David Alan Gilbert 已提交
253
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
254 255
        rdma_start_incoming_migration(p, errp);
#endif
256
#if !defined(WIN32)
D
Dr. David Alan Gilbert 已提交
257
    } else if (strstart(uri, "exec:", &p)) {
258
        exec_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
259
    } else if (strstart(uri, "unix:", &p)) {
260
        unix_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
261
    } else if (strstart(uri, "fd:", &p)) {
262
        fd_start_incoming_migration(p, errp);
263
#endif
D
Dr. David Alan Gilbert 已提交
264
    } else {
265
        error_setg(errp, "unknown migration protocol: %s", uri);
J
Juan Quintela 已提交
266
    }
A
aliguori 已提交
267 268
}

269
static void process_incoming_migration_co(void *opaque)
270
{
271
    QEMUFile *f = opaque;
272
    Error *local_err = NULL;
273 274
    int ret;

275
    migration_incoming_state_new(f);
276
    migrate_generate_event(MIGRATION_STATUS_ACTIVE);
277
    ret = qemu_loadvm_state(f);
278

279
    qemu_fclose(f);
280
    free_xbzrle_decoded_buf();
281 282
    migration_incoming_state_destroy();

283
    if (ret < 0) {
284
        migrate_generate_event(MIGRATION_STATUS_FAILED);
285
        error_report("load of migration failed: %s", strerror(-ret));
286
        migrate_decompress_threads_join();
287
        exit(EXIT_FAILURE);
288
    }
289
    migrate_generate_event(MIGRATION_STATUS_COMPLETED);
290 291
    qemu_announce_self();

292
    /* Make sure all file formats flush their mutable metadata */
293 294
    bdrv_invalidate_cache_all(&local_err);
    if (local_err) {
295
        error_report_err(local_err);
296
        migrate_decompress_threads_join();
297 298
        exit(EXIT_FAILURE);
    }
299

300 301 302
    /* If global state section was not received or we are in running
       state, we need to obey autostart. Any other state is set with
       runstate_set. */
303

304 305
    if (!global_state_received() ||
        global_state_get_runstate() == RUN_STATE_RUNNING) {
306 307 308 309 310
        if (autostart) {
            vm_start();
        } else {
            runstate_set(RUN_STATE_PAUSED);
        }
311 312
    } else {
        runstate_set(global_state_get_runstate());
313
    }
314
    migrate_decompress_threads_join();
315 316
}

317 318 319 320 321 322
void process_incoming_migration(QEMUFile *f)
{
    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
    int fd = qemu_get_fd(f);

    assert(fd != -1);
323
    migrate_decompress_threads_create();
324
    qemu_set_nonblock(fd);
325 326 327
    qemu_coroutine_enter(co, f);
}

328 329 330 331
/* amount of nanoseconds we are willing to wait for migration to be down.
 * the choice of nanoseconds is because it is the maximum resolution that
 * get_clock() can achieve. It is an internal measure. All user-visible
 * units must be in seconds */
332
static uint64_t max_downtime = 300000000;
333 334 335 336 337 338

uint64_t migrate_max_downtime(void)
{
    return max_downtime;
}

O
Orit Wasserman 已提交
339 340 341 342 343 344 345
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
{
    MigrationCapabilityStatusList *head = NULL;
    MigrationCapabilityStatusList *caps;
    MigrationState *s = migrate_get_current();
    int i;

346
    caps = NULL; /* silence compiler warning */
O
Orit Wasserman 已提交
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
    for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
        if (head == NULL) {
            head = g_malloc0(sizeof(*caps));
            caps = head;
        } else {
            caps->next = g_malloc0(sizeof(*caps));
            caps = caps->next;
        }
        caps->value =
            g_malloc(sizeof(*caps->value));
        caps->value->capability = i;
        caps->value->state = s->enabled_capabilities[i];
    }

    return head;
}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
MigrationParameters *qmp_query_migrate_parameters(Error **errp)
{
    MigrationParameters *params;
    MigrationState *s = migrate_get_current();

    params = g_malloc0(sizeof(*params));
    params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
    params->compress_threads =
            s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
    params->decompress_threads =
            s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];

    return params;
}

O
Orit Wasserman 已提交
379 380 381 382 383 384 385 386 387
static void get_xbzrle_cache_stats(MigrationInfo *info)
{
    if (migrate_use_xbzrle()) {
        info->has_xbzrle_cache = true;
        info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
        info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
        info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
        info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
        info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
388
        info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
O
Orit Wasserman 已提交
389 390 391 392
        info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
    }
}

L
Luiz Capitulino 已提交
393
MigrationInfo *qmp_query_migrate(Error **errp)
A
aliguori 已提交
394
{
L
Luiz Capitulino 已提交
395
    MigrationInfo *info = g_malloc0(sizeof(*info));
396 397 398
    MigrationState *s = migrate_get_current();

    switch (s->state) {
399
    case MIGRATION_STATUS_NONE:
400 401
        /* no migration has happened ever */
        break;
402
    case MIGRATION_STATUS_SETUP:
403
        info->has_status = true;
404
        info->has_total_time = false;
405
        break;
406 407
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_CANCELLING:
L
Luiz Capitulino 已提交
408
        info->has_status = true;
409
        info->has_total_time = true;
410
        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
411
            - s->total_time;
412 413
        info->has_expected_downtime = true;
        info->expected_downtime = s->expected_downtime;
414 415
        info->has_setup_time = true;
        info->setup_time = s->setup_time;
416

L
Luiz Capitulino 已提交
417 418 419 420 421
        info->has_ram = true;
        info->ram = g_malloc0(sizeof(*info->ram));
        info->ram->transferred = ram_bytes_transferred();
        info->ram->remaining = ram_bytes_remaining();
        info->ram->total = ram_bytes_total();
422
        info->ram->duplicate = dup_mig_pages_transferred();
423
        info->ram->skipped = skipped_mig_pages_transferred();
424 425
        info->ram->normal = norm_mig_pages_transferred();
        info->ram->normal_bytes = norm_mig_bytes_transferred();
426
        info->ram->dirty_pages_rate = s->dirty_pages_rate;
427
        info->ram->mbps = s->mbps;
428
        info->ram->dirty_sync_count = s->dirty_sync_count;
429

430
        if (blk_mig_active()) {
L
Luiz Capitulino 已提交
431 432 433 434 435
            info->has_disk = true;
            info->disk = g_malloc0(sizeof(*info->disk));
            info->disk->transferred = blk_mig_bytes_transferred();
            info->disk->remaining = blk_mig_bytes_remaining();
            info->disk->total = blk_mig_bytes_total();
A
aliguori 已提交
436
        }
O
Orit Wasserman 已提交
437 438

        get_xbzrle_cache_stats(info);
439
        break;
440
    case MIGRATION_STATUS_COMPLETED:
O
Orit Wasserman 已提交
441 442
        get_xbzrle_cache_stats(info);

L
Luiz Capitulino 已提交
443
        info->has_status = true;
444
        info->has_total_time = true;
445
        info->total_time = s->total_time;
446 447
        info->has_downtime = true;
        info->downtime = s->downtime;
448 449
        info->has_setup_time = true;
        info->setup_time = s->setup_time;
J
Juan Quintela 已提交
450 451 452 453 454 455

        info->has_ram = true;
        info->ram = g_malloc0(sizeof(*info->ram));
        info->ram->transferred = ram_bytes_transferred();
        info->ram->remaining = 0;
        info->ram->total = ram_bytes_total();
456
        info->ram->duplicate = dup_mig_pages_transferred();
457
        info->ram->skipped = skipped_mig_pages_transferred();
458 459
        info->ram->normal = norm_mig_pages_transferred();
        info->ram->normal_bytes = norm_mig_bytes_transferred();
460
        info->ram->mbps = s->mbps;
461
        info->ram->dirty_sync_count = s->dirty_sync_count;
462
        break;
463
    case MIGRATION_STATUS_FAILED:
L
Luiz Capitulino 已提交
464
        info->has_status = true;
465
        break;
466
    case MIGRATION_STATUS_CANCELLED:
L
Luiz Capitulino 已提交
467
        info->has_status = true;
468
        break;
A
aliguori 已提交
469
    }
470
    info->status = s->state;
L
Luiz Capitulino 已提交
471 472

    return info;
A
aliguori 已提交
473 474
}

O
Orit Wasserman 已提交
475 476 477 478 479 480
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
                                  Error **errp)
{
    MigrationState *s = migrate_get_current();
    MigrationCapabilityStatusList *cap;

481 482
    if (s->state == MIGRATION_STATUS_ACTIVE ||
        s->state == MIGRATION_STATUS_SETUP) {
483
        error_setg(errp, QERR_MIGRATION_ACTIVE);
O
Orit Wasserman 已提交
484 485 486 487 488 489 490 491
        return;
    }

    for (cap = params; cap; cap = cap->next) {
        s->enabled_capabilities[cap->value->capability] = cap->value->state;
    }
}

492 493 494 495 496 497 498 499 500 501
void qmp_migrate_set_parameters(bool has_compress_level,
                                int64_t compress_level,
                                bool has_compress_threads,
                                int64_t compress_threads,
                                bool has_decompress_threads,
                                int64_t decompress_threads, Error **errp)
{
    MigrationState *s = migrate_get_current();

    if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
502 503
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
                   "is invalid, it should be in the range of 0 to 9");
504 505 506 507
        return;
    }
    if (has_compress_threads &&
            (compress_threads < 1 || compress_threads > 255)) {
508 509 510
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "compress_threads",
                   "is invalid, it should be in the range of 1 to 255");
511 512 513 514
        return;
    }
    if (has_decompress_threads &&
            (decompress_threads < 1 || decompress_threads > 255)) {
515 516 517
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "decompress_threads",
                   "is invalid, it should be in the range of 1 to 255");
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
        return;
    }

    if (has_compress_level) {
        s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
    }
    if (has_compress_threads) {
        s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
    }
    if (has_decompress_threads) {
        s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
                                                    decompress_threads;
    }
}

533 534
/* shared migration helpers */

535 536
static void migrate_set_state(MigrationState *s, int old_state, int new_state)
{
J
Juan Quintela 已提交
537
    if (atomic_cmpxchg(&s->state, old_state, new_state) == old_state) {
538
        trace_migrate_set_state(new_state);
539
        migrate_generate_event(new_state);
540 541 542
    }
}

543
static void migrate_fd_cleanup(void *opaque)
544
{
545 546 547 548 549
    MigrationState *s = opaque;

    qemu_bh_delete(s->cleanup_bh);
    s->cleanup_bh = NULL;

550
    if (s->file) {
551
        trace_migrate_fd_cleanup();
552 553 554 555
        qemu_mutex_unlock_iothread();
        qemu_thread_join(&s->thread);
        qemu_mutex_lock_iothread();

556
        migrate_compress_threads_join();
557 558
        qemu_fclose(s->file);
        s->file = NULL;
559 560
    }

561
    assert(s->state != MIGRATION_STATUS_ACTIVE);
562

563
    if (s->state != MIGRATION_STATUS_COMPLETED) {
564
        qemu_savevm_state_cancel();
565 566 567
        if (s->state == MIGRATION_STATUS_CANCELLING) {
            migrate_set_state(s, MIGRATION_STATUS_CANCELLING,
                              MIGRATION_STATUS_CANCELLED);
568
        }
569
    }
570 571

    notifier_list_notify(&migration_state_notifiers, s);
572 573
}

574
void migrate_fd_error(MigrationState *s)
575
{
576
    trace_migrate_fd_error();
577
    assert(s->file == NULL);
578
    migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
579
    notifier_list_notify(&migration_state_notifiers, s);
580 581
}

582
static void migrate_fd_cancel(MigrationState *s)
583
{
584
    int old_state ;
585
    QEMUFile *f = migrate_get_current()->file;
586
    trace_migrate_fd_cancel();
587

588 589
    do {
        old_state = s->state;
590 591
        if (old_state != MIGRATION_STATUS_SETUP &&
            old_state != MIGRATION_STATUS_ACTIVE) {
592 593
            break;
        }
594 595
        migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING);
    } while (s->state != MIGRATION_STATUS_CANCELLING);
596 597 598 599 600 601 602 603

    /*
     * If we're unlucky the migration code might be stuck somewhere in a
     * send/write while the network has failed and is waiting to timeout;
     * if we've got shutdown(2) available then we can force it to quit.
     * The outgoing qemu file gets closed in migrate_fd_cleanup that is
     * called in a bh, so there is no race against this cancel.
     */
604
    if (s->state == MIGRATION_STATUS_CANCELLING && f) {
605 606
        qemu_file_shutdown(f);
    }
607 608
}

609 610 611 612 613 614 615
void add_migration_state_change_notifier(Notifier *notify)
{
    notifier_list_add(&migration_state_notifiers, notify);
}

void remove_migration_state_change_notifier(Notifier *notify)
{
P
Paolo Bonzini 已提交
616
    notifier_remove(notify);
617 618
}

S
Stefan Hajnoczi 已提交
619
bool migration_in_setup(MigrationState *s)
620
{
621
    return s->state == MIGRATION_STATUS_SETUP;
622 623
}

624
bool migration_has_finished(MigrationState *s)
625
{
626
    return s->state == MIGRATION_STATUS_COMPLETED;
627
}
628

629 630
bool migration_has_failed(MigrationState *s)
{
631 632
    return (s->state == MIGRATION_STATUS_CANCELLED ||
            s->state == MIGRATION_STATUS_FAILED);
633 634
}

I
Isaku Yamahata 已提交
635
static MigrationState *migrate_init(const MigrationParams *params)
636
{
637
    MigrationState *s = migrate_get_current();
638
    int64_t bandwidth_limit = s->bandwidth_limit;
O
Orit Wasserman 已提交
639
    bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
640
    int64_t xbzrle_cache_size = s->xbzrle_cache_size;
641 642 643 644 645
    int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
    int compress_thread_count =
            s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
    int decompress_thread_count =
            s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
O
Orit Wasserman 已提交
646 647 648

    memcpy(enabled_capabilities, s->enabled_capabilities,
           sizeof(enabled_capabilities));
649

650
    memset(s, 0, sizeof(*s));
I
Isaku Yamahata 已提交
651
    s->params = *params;
O
Orit Wasserman 已提交
652 653
    memcpy(s->enabled_capabilities, enabled_capabilities,
           sizeof(enabled_capabilities));
654
    s->xbzrle_cache_size = xbzrle_cache_size;
655

656 657 658 659 660
    s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
    s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
               compress_thread_count;
    s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
               decompress_thread_count;
661
    s->bandwidth_limit = bandwidth_limit;
662
    migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
663

664
    s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
665 666
    return s;
}
667

A
Anthony Liguori 已提交
668 669 670 671 672 673 674 675 676 677 678 679
static GSList *migration_blockers;

void migrate_add_blocker(Error *reason)
{
    migration_blockers = g_slist_prepend(migration_blockers, reason);
}

void migrate_del_blocker(Error *reason)
{
    migration_blockers = g_slist_remove(migration_blockers, reason);
}

D
Dr. David Alan Gilbert 已提交
680 681 682
void qmp_migrate_incoming(const char *uri, Error **errp)
{
    Error *local_err = NULL;
683
    static bool once = true;
D
Dr. David Alan Gilbert 已提交
684 685

    if (!deferred_incoming) {
686
        error_setg(errp, "For use with '-incoming defer'");
D
Dr. David Alan Gilbert 已提交
687 688
        return;
    }
689 690 691
    if (!once) {
        error_setg(errp, "The incoming migration has already been started");
    }
D
Dr. David Alan Gilbert 已提交
692 693 694 695 696 697 698 699

    qemu_start_incoming_migration(uri, &local_err);

    if (local_err) {
        error_propagate(errp, local_err);
        return;
    }

700
    once = false;
D
Dr. David Alan Gilbert 已提交
701 702
}

L
Luiz Capitulino 已提交
703 704 705
void qmp_migrate(const char *uri, bool has_blk, bool blk,
                 bool has_inc, bool inc, bool has_detach, bool detach,
                 Error **errp)
706
{
707
    Error *local_err = NULL;
708
    MigrationState *s = migrate_get_current();
I
Isaku Yamahata 已提交
709
    MigrationParams params;
710 711
    const char *p;

712 713
    params.blk = has_blk && blk;
    params.shared = has_inc && inc;
I
Isaku Yamahata 已提交
714

715 716 717
    if (s->state == MIGRATION_STATUS_ACTIVE ||
        s->state == MIGRATION_STATUS_SETUP ||
        s->state == MIGRATION_STATUS_CANCELLING) {
718
        error_setg(errp, QERR_MIGRATION_ACTIVE);
L
Luiz Capitulino 已提交
719
        return;
720
    }
721 722 723 724 725
    if (runstate_check(RUN_STATE_INMIGRATE)) {
        error_setg(errp, "Guest is waiting for an incoming migration");
        return;
    }

L
Luiz Capitulino 已提交
726 727
    if (qemu_savevm_state_blocked(errp)) {
        return;
728 729
    }

A
Anthony Liguori 已提交
730
    if (migration_blockers) {
L
Luiz Capitulino 已提交
731 732
        *errp = error_copy(migration_blockers->data);
        return;
A
Anthony Liguori 已提交
733 734
    }

735 736 737 738 739 740
    /* We are starting a new migration, so we want to start in a clean
       state.  This change is only needed if previous migration
       failed/was cancelled.  We don't use migrate_set_state() because
       we are setting the initial state, not changing it. */
    s->state = MIGRATION_STATUS_NONE;

I
Isaku Yamahata 已提交
741
    s = migrate_init(&params);
742 743

    if (strstart(uri, "tcp:", &p)) {
744
        tcp_start_outgoing_migration(s, p, &local_err);
M
Michael R. Hines 已提交
745
#ifdef CONFIG_RDMA
746
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
747 748
        rdma_start_outgoing_migration(s, p, &local_err);
#endif
749 750
#if !defined(WIN32)
    } else if (strstart(uri, "exec:", &p)) {
751
        exec_start_outgoing_migration(s, p, &local_err);
752
    } else if (strstart(uri, "unix:", &p)) {
753
        unix_start_outgoing_migration(s, p, &local_err);
754
    } else if (strstart(uri, "fd:", &p)) {
755
        fd_start_outgoing_migration(s, p, &local_err);
756
#endif
757
    } else {
758 759
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
                   "a valid migration protocol");
760
        migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
L
Luiz Capitulino 已提交
761
        return;
762 763
    }

764
    if (local_err) {
765
        migrate_fd_error(s);
766
        error_propagate(errp, local_err);
L
Luiz Capitulino 已提交
767
        return;
768
    }
769 770
}

L
Luiz Capitulino 已提交
771
void qmp_migrate_cancel(Error **errp)
772
{
773
    migrate_fd_cancel(migrate_get_current());
774 775
}

776 777 778
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
{
    MigrationState *s = migrate_get_current();
779
    int64_t new_size;
780 781 782

    /* Check for truncation */
    if (value != (size_t)value) {
783 784
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeding address space");
785 786 787
        return;
    }

788 789
    /* Cache should not be larger than guest ram size */
    if (value > ram_bytes_total()) {
790 791
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeds guest ram size ");
792 793 794
        return;
    }

795 796
    new_size = xbzrle_cache_resize(value);
    if (new_size < 0) {
797 798
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "is smaller than page size");
799 800 801 802
        return;
    }

    s->xbzrle_cache_size = new_size;
803 804 805 806 807 808 809
}

int64_t qmp_query_migrate_cache_size(Error **errp)
{
    return migrate_xbzrle_cache_size();
}

L
Luiz Capitulino 已提交
810
void qmp_migrate_set_speed(int64_t value, Error **errp)
811 812 813
{
    MigrationState *s;

L
Luiz Capitulino 已提交
814 815
    if (value < 0) {
        value = 0;
816
    }
817 818 819
    if (value > SIZE_MAX) {
        value = SIZE_MAX;
    }
820

821
    s = migrate_get_current();
L
Luiz Capitulino 已提交
822
    s->bandwidth_limit = value;
823 824 825
    if (s->file) {
        qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
    }
826 827
}

828
void qmp_migrate_set_downtime(double value, Error **errp)
829
{
830 831 832
    value *= 1e9;
    value = MAX(0, MIN(UINT64_MAX, value));
    max_downtime = (uint64_t)value;
833
}
834

835 836 837 838 839 840 841 842 843
bool migrate_auto_converge(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
}

844 845 846 847 848 849 850 851 852
bool migrate_zero_blocks(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
}

853 854
bool migrate_use_compression(void)
{
855 856 857 858 859
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
860 861 862 863 864 865 866 867
}

int migrate_compress_level(void)
{
    MigrationState *s;

    s = migrate_get_current();

868
    return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
869 870 871 872 873 874 875 876
}

int migrate_compress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

877
    return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
878 879
}

880 881 882 883 884 885
int migrate_decompress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

886
    return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
887 888
}

889 890 891 892 893 894 895 896 897
bool migrate_use_events(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
}

898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
int migrate_use_xbzrle(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
}

int64_t migrate_xbzrle_cache_size(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->xbzrle_cache_size;
}
915 916 917

/* migration thread support */

J
Juan Quintela 已提交
918
static void *migration_thread(void *opaque)
919
{
920
    MigrationState *s = opaque;
921 922
    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
    int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
923
    int64_t initial_bytes = 0;
924
    int64_t max_size = 0;
925 926
    int64_t start_time = initial_time;
    bool old_vm_running = false;
927

928 929
    rcu_register_thread();

930
    qemu_savevm_state_header(s->file);
931
    qemu_savevm_state_begin(s->file, &s->params);
932

933
    s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
934
    migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE);
935

936
    while (s->state == MIGRATION_STATUS_ACTIVE) {
937
        int64_t current_time;
938
        uint64_t pending_size;
939

940
        if (!qemu_file_rate_limit(s->file)) {
941
            pending_size = qemu_savevm_state_pending(s->file, max_size);
942
            trace_migrate_pending(pending_size, max_size);
943
            if (pending_size && pending_size >= max_size) {
944
                qemu_savevm_state_iterate(s->file);
945
            } else {
946 947
                int ret;

948
                qemu_mutex_lock_iothread();
949
                start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
950
                qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
951
                old_vm_running = runstate_is_running();
952

953 954 955 956 957 958 959
                ret = global_state_store();
                if (!ret) {
                    ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
                    if (ret >= 0) {
                        qemu_file_set_rate_limit(s->file, INT64_MAX);
                        qemu_savevm_state_complete(s->file);
                    }
960
                }
961
                qemu_mutex_unlock_iothread();
962 963

                if (ret < 0) {
964 965
                    migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
                                      MIGRATION_STATUS_FAILED);
966 967 968
                    break;
                }

P
Paolo Bonzini 已提交
969
                if (!qemu_file_get_error(s->file)) {
970 971
                    migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
                                      MIGRATION_STATUS_COMPLETED);
P
Paolo Bonzini 已提交
972 973
                    break;
                }
974 975
            }
        }
976

977
        if (qemu_file_get_error(s->file)) {
978 979
            migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
                              MIGRATION_STATUS_FAILED);
980 981
            break;
        }
982
        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
983
        if (current_time >= initial_time + BUFFER_DELAY) {
984
            uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
985
            uint64_t time_spent = current_time - initial_time;
986 987 988
            double bandwidth = transferred_bytes / time_spent;
            max_size = bandwidth * migrate_max_downtime() / 1000000;

989 990 991
            s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
                    ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;

992 993
            trace_migrate_transferred(transferred_bytes, time_spent,
                                      bandwidth, max_size);
994 995 996 997 998
            /* if we haven't sent anything, we don't want to recalculate
               10000 is a small enough number for our purposes */
            if (s->dirty_bytes_rate && transferred_bytes > 10000) {
                s->expected_downtime = s->dirty_bytes_rate / bandwidth;
            }
999

1000
            qemu_file_reset_rate_limit(s->file);
1001
            initial_time = current_time;
1002
            initial_bytes = qemu_ftell(s->file);
1003
        }
1004
        if (qemu_file_rate_limit(s->file)) {
1005 1006 1007
            /* usleep expects microseconds */
            g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
        }
1008 1009
    }

1010
    qemu_mutex_lock_iothread();
1011
    if (s->state == MIGRATION_STATUS_COMPLETED) {
1012
        int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1013
        uint64_t transferred_bytes = qemu_ftell(s->file);
1014 1015
        s->total_time = end_time - s->total_time;
        s->downtime = end_time - start_time;
1016 1017 1018 1019
        if (s->total_time) {
            s->mbps = (((double) transferred_bytes * 8.0) /
                       ((double) s->total_time)) / 1000;
        }
1020 1021 1022 1023
        runstate_set(RUN_STATE_POSTMIGRATE);
    } else {
        if (old_vm_running) {
            vm_start();
1024
        }
1025
    }
1026
    qemu_bh_schedule(s->cleanup_bh);
1027
    qemu_mutex_unlock_iothread();
1028

1029
    rcu_unregister_thread();
1030 1031 1032
    return NULL;
}

1033
void migrate_fd_connect(MigrationState *s)
1034
{
1035 1036
    /* This is a best 1st approximation. ns to ms */
    s->expected_downtime = max_downtime/1000000;
1037
    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1038

1039 1040 1041
    qemu_file_set_rate_limit(s->file,
                             s->bandwidth_limit / XFER_LIMIT_RATIO);

1042 1043 1044
    /* Notify before starting migration thread */
    notifier_list_notify(&migration_state_notifiers, s);

1045
    migrate_compress_threads_create();
1046
    qemu_thread_create(&s->thread, "migration", migration_thread, s,
1047
                       QEMU_THREAD_JOINABLE);
1048
}