migration.c 56.1 KB
Newer Older
A
aliguori 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * QEMU live migration
 *
 * Copyright IBM, Corp. 2008
 *
 * Authors:
 *  Anthony Liguori   <aliguori@us.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
A
aliguori 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qemu/cutils.h"
18
#include "qemu/error-report.h"
19
#include "qemu/main-loop.h"
20
#include "migration/migration.h"
21
#include "migration/qemu-file.h"
22
#include "sysemu/sysemu.h"
23
#include "block/block.h"
24
#include "qapi/qmp/qerror.h"
25
#include "qapi/util.h"
26
#include "qemu/sockets.h"
27
#include "qemu/rcu.h"
28
#include "migration/block.h"
29
#include "migration/postcopy-ram.h"
30
#include "qemu/thread.h"
L
Luiz Capitulino 已提交
31
#include "qmp-commands.h"
32
#include "trace.h"
33
#include "qapi-event.h"
34
#include "qom/cpu.h"
35 36
#include "exec/memory.h"
#include "exec/address-spaces.h"
37

38
#define MAX_THROTTLE  (32 << 20)      /* Migration transfer speed throttling */
A
aliguori 已提交
39

J
Juan Quintela 已提交
40 41 42 43 44
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
 * data. */
#define BUFFER_DELAY     100
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)

45 46
/* Default compression thread count */
#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
47 48 49
/* Default decompression thread count, usually decompression is at
 * least 4 times as fast as compression.*/
#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
50 51
/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
52
/* Define default autoconverge cpu throttle migration parameters */
53 54
#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
55

56 57 58
/* Migration XBZRLE default cache size */
#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)

59 60 61
static NotifierList migration_state_notifiers =
    NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);

D
Dr. David Alan Gilbert 已提交
62 63
static bool deferred_incoming;

64 65 66 67 68 69 70
/*
 * Current state of incoming postcopy; note this is not part of
 * MigrationIncomingState since it's state is used during cleanup
 * at the end as MIS is being freed.
 */
static PostcopyState incoming_postcopy_state;

71 72 73 74
/* When we add fault tolerance, we could have several
   migrations at once.  For now we don't need to add
   dynamic creation of migration */

75
/* For outgoing */
76
MigrationState *migrate_get_current(void)
77
{
78
    static bool once;
79
    static MigrationState current_migration = {
80
        .state = MIGRATION_STATUS_NONE,
81
        .bandwidth_limit = MAX_THROTTLE,
82
        .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
83
        .mbps = -1,
84 85 86 87 88 89
        .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
                DEFAULT_MIGRATE_COMPRESS_LEVEL,
        .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
                DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
        .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
                DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
90 91 92 93
        .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
                DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
        .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
                DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
94 95
    };

96 97 98 99
    if (!once) {
        qemu_mutex_init(&current_migration.src_page_req_mutex);
        once = true;
    }
100 101 102
    return &current_migration;
}

103 104 105 106 107 108 109 110 111 112
/* For incoming */
static MigrationIncomingState *mis_current;

MigrationIncomingState *migration_incoming_get_current(void)
{
    return mis_current;
}

MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
{
113
    mis_current = g_new0(MigrationIncomingState, 1);
114
    mis_current->from_src_file = f;
115
    mis_current->state = MIGRATION_STATUS_NONE;
116
    QLIST_INIT(&mis_current->loadvm_handlers);
117
    qemu_mutex_init(&mis_current->rp_mutex);
118
    qemu_event_init(&mis_current->main_thread_load_event, false);
119 120 121 122 123 124

    return mis_current;
}

void migration_incoming_state_destroy(void)
{
125
    qemu_event_destroy(&mis_current->main_thread_load_event);
126
    loadvm_free_handlers(mis_current);
127 128 129 130
    g_free(mis_current);
    mis_current = NULL;
}

131 132

typedef struct {
133
    bool optional;
134 135
    uint32_t size;
    uint8_t runstate[100];
136 137
    RunState state;
    bool received;
138 139 140 141
} GlobalState;

static GlobalState global_state;

142
int global_state_store(void)
143 144 145 146 147 148 149 150 151 152
{
    if (!runstate_store((char *)global_state.runstate,
                        sizeof(global_state.runstate))) {
        error_report("runstate name too big: %s", global_state.runstate);
        trace_migrate_state_too_big();
        return -EINVAL;
    }
    return 0;
}

153 154 155 156 157 158 159
void global_state_store_running(void)
{
    const char *state = RunState_lookup[RUN_STATE_RUNNING];
    strncpy((char *)global_state.runstate,
           state, sizeof(global_state.runstate));
}

160
static bool global_state_received(void)
161
{
162 163 164 165 166 167
    return global_state.received;
}

static RunState global_state_get_runstate(void)
{
    return global_state.state;
168 169
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
void global_state_set_optional(void)
{
    global_state.optional = true;
}

static bool global_state_needed(void *opaque)
{
    GlobalState *s = opaque;
    char *runstate = (char *)s->runstate;

    /* If it is not optional, it is mandatory */

    if (s->optional == false) {
        return true;
    }

    /* If state is running or paused, it is not needed */

    if (strcmp(runstate, "running") == 0 ||
        strcmp(runstate, "paused") == 0) {
        return false;
    }

    /* for any other state it is needed */
    return true;
}

197 198 199
static int global_state_post_load(void *opaque, int version_id)
{
    GlobalState *s = opaque;
200 201
    Error *local_err = NULL;
    int r;
202 203
    char *runstate = (char *)s->runstate;

204
    s->received = true;
205 206
    trace_migrate_global_state_post_load(runstate);

207
    r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
208 209
                                -1, &local_err);

210 211 212
    if (r == -1) {
        if (local_err) {
            error_report_err(local_err);
213
        }
214
        return -EINVAL;
215
    }
216
    s->state = r;
217

218
    return 0;
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
}

static void global_state_pre_save(void *opaque)
{
    GlobalState *s = opaque;

    trace_migrate_global_state_pre_save((char *)s->runstate);
    s->size = strlen((char *)s->runstate) + 1;
}

static const VMStateDescription vmstate_globalstate = {
    .name = "globalstate",
    .version_id = 1,
    .minimum_version_id = 1,
    .post_load = global_state_post_load,
    .pre_save = global_state_pre_save,
235
    .needed = global_state_needed,
236 237 238 239 240 241 242 243 244 245 246
    .fields = (VMStateField[]) {
        VMSTATE_UINT32(size, GlobalState),
        VMSTATE_BUFFER(runstate, GlobalState),
        VMSTATE_END_OF_LIST()
    },
};

void register_global_state(void)
{
    /* We would use it independently that we receive it */
    strcpy((char *)&global_state.runstate, "");
247
    global_state.received = false;
248 249 250
    vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
}

251 252 253 254 255 256 257
static void migrate_generate_event(int new_state)
{
    if (migrate_use_events()) {
        qapi_event_send_migration(new_state, &error_abort);
    }
}

D
Dr. David Alan Gilbert 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270
/*
 * Called on -incoming with a defer: uri.
 * The migration can be started later after any parameters have been
 * changed.
 */
static void deferred_incoming_migration(Error **errp)
{
    if (deferred_incoming) {
        error_setg(errp, "Incoming migration already deferred");
    }
    deferred_incoming = true;
}

271 272 273 274 275 276 277 278 279 280
/* Request a range of pages from the source VM at the given
 * start address.
 *   rbname: Name of the RAMBlock to request the page in, if NULL it's the same
 *           as the last request (a name must have been given previously)
 *   Start: Address offset within the RB
 *   Len: Length in bytes required - must be a multiple of pagesize
 */
void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
                               ram_addr_t start, size_t len)
{
S
Stefan Weil 已提交
281
    uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
    size_t msglen = 12; /* start + len */

    *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
    *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);

    if (rbname) {
        int rbname_len = strlen(rbname);
        assert(rbname_len < 256);

        bufc[msglen++] = rbname_len;
        memcpy(bufc + msglen, rbname, rbname_len);
        msglen += rbname_len;
        migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
    } else {
        migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
    }
}

300
void qemu_start_incoming_migration(const char *uri, Error **errp)
A
aliguori 已提交
301
{
A
aliguori 已提交
302 303
    const char *p;

304
    qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
D
Dr. David Alan Gilbert 已提交
305 306 307
    if (!strcmp(uri, "defer")) {
        deferred_incoming_migration(errp);
    } else if (strstart(uri, "tcp:", &p)) {
308
        tcp_start_incoming_migration(p, errp);
M
Michael R. Hines 已提交
309
#ifdef CONFIG_RDMA
D
Dr. David Alan Gilbert 已提交
310
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
311 312
        rdma_start_incoming_migration(p, errp);
#endif
313
#if !defined(WIN32)
D
Dr. David Alan Gilbert 已提交
314
    } else if (strstart(uri, "exec:", &p)) {
315
        exec_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
316
    } else if (strstart(uri, "unix:", &p)) {
317
        unix_start_incoming_migration(p, errp);
D
Dr. David Alan Gilbert 已提交
318
    } else if (strstart(uri, "fd:", &p)) {
319
        fd_start_incoming_migration(p, errp);
320
#endif
D
Dr. David Alan Gilbert 已提交
321
    } else {
322
        error_setg(errp, "unknown migration protocol: %s", uri);
J
Juan Quintela 已提交
323
    }
A
aliguori 已提交
324 325
}

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
static void process_incoming_migration_bh(void *opaque)
{
    Error *local_err = NULL;
    MigrationIncomingState *mis = opaque;

    /* Make sure all file formats flush their mutable metadata */
    bdrv_invalidate_cache_all(&local_err);
    if (local_err) {
        migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                          MIGRATION_STATUS_FAILED);
        error_report_err(local_err);
        migrate_decompress_threads_join();
        exit(EXIT_FAILURE);
    }

    /*
     * This must happen after all error conditions are dealt with and
     * we're sure the VM is going to be running on this host.
     */
    qemu_announce_self();

    /* If global state section was not received or we are in running
       state, we need to obey autostart. Any other state is set with
       runstate_set. */

    if (!global_state_received() ||
        global_state_get_runstate() == RUN_STATE_RUNNING) {
        if (autostart) {
            vm_start();
        } else {
            runstate_set(RUN_STATE_PAUSED);
        }
    } else {
        runstate_set(global_state_get_runstate());
    }
    migrate_decompress_threads_join();
    /*
     * This must happen after any state changes since as soon as an external
     * observer sees this event they might start to prod at the VM assuming
     * it's ready to use.
     */
    migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                      MIGRATION_STATUS_COMPLETED);
    qemu_bh_delete(mis->bh);
    migration_incoming_state_destroy();
}

373
static void process_incoming_migration_co(void *opaque)
374
{
375
    QEMUFile *f = opaque;
376 377
    MigrationIncomingState *mis;
    PostcopyState ps;
378 379
    int ret;

380
    mis = migration_incoming_state_new(f);
381
    postcopy_state_set(POSTCOPY_INCOMING_NONE);
382 383
    migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
                      MIGRATION_STATUS_ACTIVE);
384
    ret = qemu_loadvm_state(f);
385

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
    ps = postcopy_state_get();
    trace_process_incoming_migration_co_end(ret, ps);
    if (ps != POSTCOPY_INCOMING_NONE) {
        if (ps == POSTCOPY_INCOMING_ADVISE) {
            /*
             * Where a migration had postcopy enabled (and thus went to advise)
             * but managed to complete within the precopy period, we can use
             * the normal exit.
             */
            postcopy_ram_incoming_cleanup(mis);
        } else if (ret >= 0) {
            /*
             * Postcopy was started, cleanup should happen at the end of the
             * postcopy thread.
             */
            trace_process_incoming_migration_co_postcopy_end_main();
            return;
        }
        /* Else if something went wrong then just fall out of the normal exit */
    }

407
    qemu_fclose(f);
408
    free_xbzrle_decoded_buf();
409

410
    if (ret < 0) {
411 412
        migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
                          MIGRATION_STATUS_FAILED);
413
        error_report("load of migration failed: %s", strerror(-ret));
414
        migrate_decompress_threads_join();
415
        exit(EXIT_FAILURE);
416 417
    }

418 419
    mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
    qemu_bh_schedule(mis->bh);
420 421
}

422 423 424 425
void process_incoming_migration(QEMUFile *f)
{
    Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);

426
    migrate_decompress_threads_create();
427
    qemu_file_set_blocking(f, false);
428 429 430
    qemu_coroutine_enter(co, f);
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451

void migration_set_incoming_channel(MigrationState *s,
                                    QIOChannel *ioc)
{
    QEMUFile *f = qemu_fopen_channel_input(ioc);

    process_incoming_migration(f);
}


void migration_set_outgoing_channel(MigrationState *s,
                                    QIOChannel *ioc)
{
    QEMUFile *f = qemu_fopen_channel_output(ioc);

    s->to_dst_file = f;

    migrate_fd_connect(s);
}


452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
/*
 * Send a message on the return channel back to the source
 * of the migration.
 */
void migrate_send_rp_message(MigrationIncomingState *mis,
                             enum mig_rp_message_type message_type,
                             uint16_t len, void *data)
{
    trace_migrate_send_rp_message((int)message_type, len);
    qemu_mutex_lock(&mis->rp_mutex);
    qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
    qemu_put_be16(mis->to_src_file, len);
    qemu_put_buffer(mis->to_src_file, data, len);
    qemu_fflush(mis->to_src_file);
    qemu_mutex_unlock(&mis->rp_mutex);
}

/*
 * Send a 'SHUT' message on the return channel with the given value
 * to indicate that we've finished with the RP.  Non-0 value indicates
 * error.
 */
void migrate_send_rp_shut(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
}

/*
 * Send a 'PONG' message on the return channel with the given value
 * (normally in response to a 'PING')
 */
void migrate_send_rp_pong(MigrationIncomingState *mis,
                          uint32_t value)
{
    uint32_t buf;

    buf = cpu_to_be32(value);
    migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
}

496 497 498 499
/* amount of nanoseconds we are willing to wait for migration to be down.
 * the choice of nanoseconds is because it is the maximum resolution that
 * get_clock() can achieve. It is an internal measure. All user-visible
 * units must be in seconds */
500
static uint64_t max_downtime = 300000000;
501 502 503 504 505 506

uint64_t migrate_max_downtime(void)
{
    return max_downtime;
}

O
Orit Wasserman 已提交
507 508 509 510 511 512 513
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
{
    MigrationCapabilityStatusList *head = NULL;
    MigrationCapabilityStatusList *caps;
    MigrationState *s = migrate_get_current();
    int i;

514
    caps = NULL; /* silence compiler warning */
515
    for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
O
Orit Wasserman 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
        if (head == NULL) {
            head = g_malloc0(sizeof(*caps));
            caps = head;
        } else {
            caps->next = g_malloc0(sizeof(*caps));
            caps = caps->next;
        }
        caps->value =
            g_malloc(sizeof(*caps->value));
        caps->value->capability = i;
        caps->value->state = s->enabled_capabilities[i];
    }

    return head;
}

532 533 534 535 536 537 538 539 540 541 542
MigrationParameters *qmp_query_migrate_parameters(Error **errp)
{
    MigrationParameters *params;
    MigrationState *s = migrate_get_current();

    params = g_malloc0(sizeof(*params));
    params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
    params->compress_threads =
            s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
    params->decompress_threads =
            s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
543 544 545 546
    params->cpu_throttle_initial =
            s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL];
    params->cpu_throttle_increment =
            s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT];
547 548 549 550

    return params;
}

551 552 553 554 555 556 557 558
/*
 * Return true if we're already in the middle of a migration
 * (i.e. any of the active or setup states)
 */
static bool migration_is_setup_or_active(int state)
{
    switch (state) {
    case MIGRATION_STATUS_ACTIVE:
559
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
560 561 562 563 564 565 566 567 568
    case MIGRATION_STATUS_SETUP:
        return true;

    default:
        return false;

    }
}

O
Orit Wasserman 已提交
569 570 571 572 573 574 575 576 577
static void get_xbzrle_cache_stats(MigrationInfo *info)
{
    if (migrate_use_xbzrle()) {
        info->has_xbzrle_cache = true;
        info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
        info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
        info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
        info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
        info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
578
        info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
O
Orit Wasserman 已提交
579 580 581 582
        info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
    }
}

L
Luiz Capitulino 已提交
583
MigrationInfo *qmp_query_migrate(Error **errp)
A
aliguori 已提交
584
{
L
Luiz Capitulino 已提交
585
    MigrationInfo *info = g_malloc0(sizeof(*info));
586 587 588
    MigrationState *s = migrate_get_current();

    switch (s->state) {
589
    case MIGRATION_STATUS_NONE:
590 591
        /* no migration has happened ever */
        break;
592
    case MIGRATION_STATUS_SETUP:
593
        info->has_status = true;
594
        info->has_total_time = false;
595
        break;
596 597
    case MIGRATION_STATUS_ACTIVE:
    case MIGRATION_STATUS_CANCELLING:
L
Luiz Capitulino 已提交
598
        info->has_status = true;
599
        info->has_total_time = true;
600
        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
601
            - s->total_time;
602 603
        info->has_expected_downtime = true;
        info->expected_downtime = s->expected_downtime;
604 605
        info->has_setup_time = true;
        info->setup_time = s->setup_time;
606

L
Luiz Capitulino 已提交
607 608 609 610 611
        info->has_ram = true;
        info->ram = g_malloc0(sizeof(*info->ram));
        info->ram->transferred = ram_bytes_transferred();
        info->ram->remaining = ram_bytes_remaining();
        info->ram->total = ram_bytes_total();
612
        info->ram->duplicate = dup_mig_pages_transferred();
613
        info->ram->skipped = skipped_mig_pages_transferred();
614 615
        info->ram->normal = norm_mig_pages_transferred();
        info->ram->normal_bytes = norm_mig_bytes_transferred();
616
        info->ram->dirty_pages_rate = s->dirty_pages_rate;
617
        info->ram->mbps = s->mbps;
618
        info->ram->dirty_sync_count = s->dirty_sync_count;
619

620
        if (blk_mig_active()) {
L
Luiz Capitulino 已提交
621 622 623 624 625
            info->has_disk = true;
            info->disk = g_malloc0(sizeof(*info->disk));
            info->disk->transferred = blk_mig_bytes_transferred();
            info->disk->remaining = blk_mig_bytes_remaining();
            info->disk->total = blk_mig_bytes_total();
A
aliguori 已提交
626
        }
O
Orit Wasserman 已提交
627

628
        if (cpu_throttle_active()) {
629 630
            info->has_cpu_throttle_percentage = true;
            info->cpu_throttle_percentage = cpu_throttle_get_percentage();
631 632
        }

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
        get_xbzrle_cache_stats(info);
        break;
    case MIGRATION_STATUS_POSTCOPY_ACTIVE:
        /* Mostly the same as active; TODO add some postcopy stats */
        info->has_status = true;
        info->has_total_time = true;
        info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
            - s->total_time;
        info->has_expected_downtime = true;
        info->expected_downtime = s->expected_downtime;
        info->has_setup_time = true;
        info->setup_time = s->setup_time;

        info->has_ram = true;
        info->ram = g_malloc0(sizeof(*info->ram));
        info->ram->transferred = ram_bytes_transferred();
        info->ram->remaining = ram_bytes_remaining();
        info->ram->total = ram_bytes_total();
        info->ram->duplicate = dup_mig_pages_transferred();
        info->ram->skipped = skipped_mig_pages_transferred();
        info->ram->normal = norm_mig_pages_transferred();
        info->ram->normal_bytes = norm_mig_bytes_transferred();
        info->ram->dirty_pages_rate = s->dirty_pages_rate;
        info->ram->mbps = s->mbps;
657
        info->ram->dirty_sync_count = s->dirty_sync_count;
658 659 660 661 662 663 664 665 666

        if (blk_mig_active()) {
            info->has_disk = true;
            info->disk = g_malloc0(sizeof(*info->disk));
            info->disk->transferred = blk_mig_bytes_transferred();
            info->disk->remaining = blk_mig_bytes_remaining();
            info->disk->total = blk_mig_bytes_total();
        }

O
Orit Wasserman 已提交
667
        get_xbzrle_cache_stats(info);
668
        break;
669
    case MIGRATION_STATUS_COMPLETED:
O
Orit Wasserman 已提交
670 671
        get_xbzrle_cache_stats(info);

L
Luiz Capitulino 已提交
672
        info->has_status = true;
673
        info->has_total_time = true;
674
        info->total_time = s->total_time;
675 676
        info->has_downtime = true;
        info->downtime = s->downtime;
677 678
        info->has_setup_time = true;
        info->setup_time = s->setup_time;
J
Juan Quintela 已提交
679 680 681 682 683 684

        info->has_ram = true;
        info->ram = g_malloc0(sizeof(*info->ram));
        info->ram->transferred = ram_bytes_transferred();
        info->ram->remaining = 0;
        info->ram->total = ram_bytes_total();
685
        info->ram->duplicate = dup_mig_pages_transferred();
686
        info->ram->skipped = skipped_mig_pages_transferred();
687 688
        info->ram->normal = norm_mig_pages_transferred();
        info->ram->normal_bytes = norm_mig_bytes_transferred();
689
        info->ram->mbps = s->mbps;
690
        info->ram->dirty_sync_count = s->dirty_sync_count;
691
        break;
692
    case MIGRATION_STATUS_FAILED:
L
Luiz Capitulino 已提交
693
        info->has_status = true;
694
        break;
695
    case MIGRATION_STATUS_CANCELLED:
L
Luiz Capitulino 已提交
696
        info->has_status = true;
697
        break;
A
aliguori 已提交
698
    }
699
    info->status = s->state;
L
Luiz Capitulino 已提交
700 701

    return info;
A
aliguori 已提交
702 703
}

O
Orit Wasserman 已提交
704 705 706 707 708 709
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
                                  Error **errp)
{
    MigrationState *s = migrate_get_current();
    MigrationCapabilityStatusList *cap;

710
    if (migration_is_setup_or_active(s->state)) {
711
        error_setg(errp, QERR_MIGRATION_ACTIVE);
O
Orit Wasserman 已提交
712 713 714 715 716 717
        return;
    }

    for (cap = params; cap; cap = cap->next) {
        s->enabled_capabilities[cap->value->capability] = cap->value->state;
    }
718 719 720 721 722 723 724 725 726 727

    if (migrate_postcopy_ram()) {
        if (migrate_use_compression()) {
            /* The decompression threads asynchronously write into RAM
             * rather than use the atomic copies needed to avoid
             * userfaulting.  It should be possible to fix the decompression
             * threads for compatibility in future.
             */
            error_report("Postcopy is not currently compatible with "
                         "compression");
D
Dr. David Alan Gilbert 已提交
728
            s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
729 730 731
                false;
        }
    }
O
Orit Wasserman 已提交
732 733
}

734 735 736 737 738
void qmp_migrate_set_parameters(bool has_compress_level,
                                int64_t compress_level,
                                bool has_compress_threads,
                                int64_t compress_threads,
                                bool has_decompress_threads,
739
                                int64_t decompress_threads,
740 741 742 743
                                bool has_cpu_throttle_initial,
                                int64_t cpu_throttle_initial,
                                bool has_cpu_throttle_increment,
                                int64_t cpu_throttle_increment, Error **errp)
744 745 746 747
{
    MigrationState *s = migrate_get_current();

    if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
748 749
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
                   "is invalid, it should be in the range of 0 to 9");
750 751 752 753
        return;
    }
    if (has_compress_threads &&
            (compress_threads < 1 || compress_threads > 255)) {
754 755 756
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "compress_threads",
                   "is invalid, it should be in the range of 1 to 255");
757 758 759 760
        return;
    }
    if (has_decompress_threads &&
            (decompress_threads < 1 || decompress_threads > 255)) {
761 762 763
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
                   "decompress_threads",
                   "is invalid, it should be in the range of 1 to 255");
764 765
        return;
    }
766 767
    if (has_cpu_throttle_initial &&
            (cpu_throttle_initial < 1 || cpu_throttle_initial > 99)) {
768
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
769
                   "cpu_throttle_initial",
770 771
                   "an integer in the range of 1 to 99");
    }
772 773
    if (has_cpu_throttle_increment &&
            (cpu_throttle_increment < 1 || cpu_throttle_increment > 99)) {
774
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
775
                   "cpu_throttle_increment",
776 777
                   "an integer in the range of 1 to 99");
    }
778 779 780 781 782 783 784 785 786 787 788

    if (has_compress_level) {
        s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
    }
    if (has_compress_threads) {
        s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
    }
    if (has_decompress_threads) {
        s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
                                                    decompress_threads;
    }
789 790 791
    if (has_cpu_throttle_initial) {
        s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
                                                    cpu_throttle_initial;
792 793
    }

794 795 796
    if (has_cpu_throttle_increment) {
        s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
                                                    cpu_throttle_increment;
797
    }
798 799
}

800 801 802 803 804
void qmp_migrate_start_postcopy(Error **errp)
{
    MigrationState *s = migrate_get_current();

    if (!migrate_postcopy_ram()) {
805
        error_setg(errp, "Enable postcopy with migrate_set_capability before"
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
                         " the start of migration");
        return;
    }

    if (s->state == MIGRATION_STATUS_NONE) {
        error_setg(errp, "Postcopy must be started after migration has been"
                         " started");
        return;
    }
    /*
     * we don't error if migration has finished since that would be racy
     * with issuing this command.
     */
    atomic_set(&s->start_postcopy, true);
}

822 823
/* shared migration helpers */

824
void migrate_set_state(int *state, int old_state, int new_state)
825
{
826
    if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
827
        trace_migrate_set_state(new_state);
828
        migrate_generate_event(new_state);
829 830 831
    }
}

832
static void migrate_fd_cleanup(void *opaque)
833
{
834 835 836 837 838
    MigrationState *s = opaque;

    qemu_bh_delete(s->cleanup_bh);
    s->cleanup_bh = NULL;

839 840
    flush_page_queue(s);

841
    if (s->to_dst_file) {
842
        trace_migrate_fd_cleanup();
843
        qemu_mutex_unlock_iothread();
844 845 846 847
        if (s->migration_thread_running) {
            qemu_thread_join(&s->thread);
            s->migration_thread_running = false;
        }
848 849
        qemu_mutex_lock_iothread();

850
        migrate_compress_threads_join();
851 852
        qemu_fclose(s->to_dst_file);
        s->to_dst_file = NULL;
853 854
    }

855 856
    assert((s->state != MIGRATION_STATUS_ACTIVE) &&
           (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
857

858
    if (s->state == MIGRATION_STATUS_CANCELLING) {
859
        migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
860
                          MIGRATION_STATUS_CANCELLED);
861
    }
862 863

    notifier_list_notify(&migration_state_notifiers, s);
864 865
}

866
void migrate_fd_error(MigrationState *s)
867
{
868
    trace_migrate_fd_error();
869
    assert(s->to_dst_file == NULL);
870 871
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_FAILED);
872
    notifier_list_notify(&migration_state_notifiers, s);
873 874
}

875
static void migrate_fd_cancel(MigrationState *s)
876
{
877
    int old_state ;
878
    QEMUFile *f = migrate_get_current()->to_dst_file;
879
    trace_migrate_fd_cancel();
880

881 882 883 884 885
    if (s->rp_state.from_dst_file) {
        /* shutdown the rp socket, so causing the rp thread to shutdown */
        qemu_file_shutdown(s->rp_state.from_dst_file);
    }

886 887
    do {
        old_state = s->state;
888
        if (!migration_is_setup_or_active(old_state)) {
889 890
            break;
        }
891
        migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
892
    } while (s->state != MIGRATION_STATUS_CANCELLING);
893 894 895 896 897 898 899 900

    /*
     * If we're unlucky the migration code might be stuck somewhere in a
     * send/write while the network has failed and is waiting to timeout;
     * if we've got shutdown(2) available then we can force it to quit.
     * The outgoing qemu file gets closed in migrate_fd_cleanup that is
     * called in a bh, so there is no race against this cancel.
     */
901
    if (s->state == MIGRATION_STATUS_CANCELLING && f) {
902 903
        qemu_file_shutdown(f);
    }
904 905
}

906 907 908 909 910 911 912
void add_migration_state_change_notifier(Notifier *notify)
{
    notifier_list_add(&migration_state_notifiers, notify);
}

void remove_migration_state_change_notifier(Notifier *notify)
{
P
Paolo Bonzini 已提交
913
    notifier_remove(notify);
914 915
}

S
Stefan Hajnoczi 已提交
916
bool migration_in_setup(MigrationState *s)
917
{
918
    return s->state == MIGRATION_STATUS_SETUP;
919 920
}

921
bool migration_has_finished(MigrationState *s)
922
{
923
    return s->state == MIGRATION_STATUS_COMPLETED;
924
}
925

926 927
bool migration_has_failed(MigrationState *s)
{
928 929
    return (s->state == MIGRATION_STATUS_CANCELLED ||
            s->state == MIGRATION_STATUS_FAILED);
930 931
}

932 933 934 935 936
bool migration_in_postcopy(MigrationState *s)
{
    return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
}

937 938 939 940 941
bool migration_in_postcopy_after_devices(MigrationState *s)
{
    return migration_in_postcopy(s) && s->postcopy_after_devices;
}

942
MigrationState *migrate_init(const MigrationParams *params)
943
{
944
    MigrationState *s = migrate_get_current();
O
Orit Wasserman 已提交
945

946 947 948 949 950 951 952 953
    /*
     * Reinitialise all migration state, except
     * parameters/capabilities that the user set, and
     * locks.
     */
    s->bytes_xfer = 0;
    s->xfer_limit = 0;
    s->cleanup_bh = 0;
954
    s->to_dst_file = NULL;
955
    s->state = MIGRATION_STATUS_NONE;
I
Isaku Yamahata 已提交
956
    s->params = *params;
957 958 959 960 961 962 963 964 965 966
    s->rp_state.from_dst_file = NULL;
    s->rp_state.error = false;
    s->mbps = 0.0;
    s->downtime = 0;
    s->expected_downtime = 0;
    s->dirty_pages_rate = 0;
    s->dirty_bytes_rate = 0;
    s->setup_time = 0;
    s->dirty_sync_count = 0;
    s->start_postcopy = false;
967
    s->postcopy_after_devices = false;
968 969 970
    s->migration_thread_running = false;
    s->last_req_rb = NULL;

971
    migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
972

973 974
    QSIMPLEQ_INIT(&s->src_page_requests);

975
    s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
976 977
    return s;
}
978

A
Anthony Liguori 已提交
979 980 981 982 983 984 985 986 987 988 989 990
static GSList *migration_blockers;

void migrate_add_blocker(Error *reason)
{
    migration_blockers = g_slist_prepend(migration_blockers, reason);
}

void migrate_del_blocker(Error *reason)
{
    migration_blockers = g_slist_remove(migration_blockers, reason);
}

D
Dr. David Alan Gilbert 已提交
991 992 993
void qmp_migrate_incoming(const char *uri, Error **errp)
{
    Error *local_err = NULL;
994
    static bool once = true;
D
Dr. David Alan Gilbert 已提交
995 996

    if (!deferred_incoming) {
997
        error_setg(errp, "For use with '-incoming defer'");
D
Dr. David Alan Gilbert 已提交
998 999
        return;
    }
1000 1001 1002
    if (!once) {
        error_setg(errp, "The incoming migration has already been started");
    }
D
Dr. David Alan Gilbert 已提交
1003 1004 1005 1006 1007 1008 1009 1010

    qemu_start_incoming_migration(uri, &local_err);

    if (local_err) {
        error_propagate(errp, local_err);
        return;
    }

1011
    once = false;
D
Dr. David Alan Gilbert 已提交
1012 1013
}

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
bool migration_is_blocked(Error **errp)
{
    if (qemu_savevm_state_blocked(errp)) {
        return true;
    }

    if (migration_blockers) {
        *errp = error_copy(migration_blockers->data);
        return true;
    }

    return false;
}

L
Luiz Capitulino 已提交
1028 1029 1030
void qmp_migrate(const char *uri, bool has_blk, bool blk,
                 bool has_inc, bool inc, bool has_detach, bool detach,
                 Error **errp)
1031
{
1032
    Error *local_err = NULL;
1033
    MigrationState *s = migrate_get_current();
I
Isaku Yamahata 已提交
1034
    MigrationParams params;
1035 1036
    const char *p;

1037 1038
    params.blk = has_blk && blk;
    params.shared = has_inc && inc;
I
Isaku Yamahata 已提交
1039

1040
    if (migration_is_setup_or_active(s->state) ||
1041
        s->state == MIGRATION_STATUS_CANCELLING) {
1042
        error_setg(errp, QERR_MIGRATION_ACTIVE);
L
Luiz Capitulino 已提交
1043
        return;
1044
    }
1045 1046 1047 1048 1049
    if (runstate_check(RUN_STATE_INMIGRATE)) {
        error_setg(errp, "Guest is waiting for an incoming migration");
        return;
    }

1050
    if (migration_is_blocked(errp)) {
L
Luiz Capitulino 已提交
1051
        return;
A
Anthony Liguori 已提交
1052 1053
    }

I
Isaku Yamahata 已提交
1054
    s = migrate_init(&params);
1055 1056

    if (strstart(uri, "tcp:", &p)) {
1057
        tcp_start_outgoing_migration(s, p, &local_err);
M
Michael R. Hines 已提交
1058
#ifdef CONFIG_RDMA
1059
    } else if (strstart(uri, "rdma:", &p)) {
M
Michael R. Hines 已提交
1060 1061
        rdma_start_outgoing_migration(s, p, &local_err);
#endif
1062 1063
#if !defined(WIN32)
    } else if (strstart(uri, "exec:", &p)) {
1064
        exec_start_outgoing_migration(s, p, &local_err);
1065
    } else if (strstart(uri, "unix:", &p)) {
1066
        unix_start_outgoing_migration(s, p, &local_err);
1067
    } else if (strstart(uri, "fd:", &p)) {
1068
        fd_start_outgoing_migration(s, p, &local_err);
1069
#endif
1070
    } else {
1071 1072
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
                   "a valid migration protocol");
1073 1074
        migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                          MIGRATION_STATUS_FAILED);
L
Luiz Capitulino 已提交
1075
        return;
1076 1077
    }

1078
    if (local_err) {
1079
        migrate_fd_error(s);
1080
        error_propagate(errp, local_err);
L
Luiz Capitulino 已提交
1081
        return;
1082
    }
1083 1084
}

L
Luiz Capitulino 已提交
1085
void qmp_migrate_cancel(Error **errp)
1086
{
1087
    migrate_fd_cancel(migrate_get_current());
1088 1089
}

1090 1091 1092
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
{
    MigrationState *s = migrate_get_current();
1093
    int64_t new_size;
1094 1095 1096

    /* Check for truncation */
    if (value != (size_t)value) {
1097 1098
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeding address space");
1099 1100 1101
        return;
    }

1102 1103
    /* Cache should not be larger than guest ram size */
    if (value > ram_bytes_total()) {
1104 1105
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "exceeds guest ram size ");
1106 1107 1108
        return;
    }

1109 1110
    new_size = xbzrle_cache_resize(value);
    if (new_size < 0) {
1111 1112
        error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
                   "is smaller than page size");
1113 1114 1115 1116
        return;
    }

    s->xbzrle_cache_size = new_size;
1117 1118 1119 1120 1121 1122 1123
}

int64_t qmp_query_migrate_cache_size(Error **errp)
{
    return migrate_xbzrle_cache_size();
}

L
Luiz Capitulino 已提交
1124
void qmp_migrate_set_speed(int64_t value, Error **errp)
1125 1126 1127
{
    MigrationState *s;

L
Luiz Capitulino 已提交
1128 1129
    if (value < 0) {
        value = 0;
1130
    }
1131 1132 1133
    if (value > SIZE_MAX) {
        value = SIZE_MAX;
    }
1134

1135
    s = migrate_get_current();
L
Luiz Capitulino 已提交
1136
    s->bandwidth_limit = value;
1137 1138 1139
    if (s->to_dst_file) {
        qemu_file_set_rate_limit(s->to_dst_file,
                                 s->bandwidth_limit / XFER_LIMIT_RATIO);
1140
    }
1141 1142
}

1143
void qmp_migrate_set_downtime(double value, Error **errp)
1144
{
1145 1146 1147
    value *= 1e9;
    value = MAX(0, MIN(UINT64_MAX, value));
    max_downtime = (uint64_t)value;
1148
}
1149

1150 1151 1152 1153 1154 1155
bool migrate_postcopy_ram(void)
{
    MigrationState *s;

    s = migrate_get_current();

D
Dr. David Alan Gilbert 已提交
1156
    return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1157 1158
}

1159 1160 1161 1162 1163 1164 1165 1166 1167
bool migrate_auto_converge(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
}

1168 1169 1170 1171 1172 1173 1174 1175 1176
bool migrate_zero_blocks(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
}

1177 1178
bool migrate_use_compression(void)
{
1179 1180 1181 1182 1183
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1184 1185 1186 1187 1188 1189 1190 1191
}

int migrate_compress_level(void)
{
    MigrationState *s;

    s = migrate_get_current();

1192
    return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
1193 1194 1195 1196 1197 1198 1199 1200
}

int migrate_compress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

1201
    return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
1202 1203
}

1204 1205 1206 1207 1208 1209
int migrate_decompress_threads(void)
{
    MigrationState *s;

    s = migrate_get_current();

1210
    return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
1211 1212
}

1213 1214 1215 1216 1217 1218 1219 1220 1221
bool migrate_use_events(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
}

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
int migrate_use_xbzrle(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
}

int64_t migrate_xbzrle_cache_size(void)
{
    MigrationState *s;

    s = migrate_get_current();

    return s->xbzrle_cache_size;
}
1239

1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
/* migration thread support */
/*
 * Something bad happened to the RP stream, mark an error
 * The caller shall print or trace something to indicate why
 */
static void mark_source_rp_bad(MigrationState *s)
{
    s->rp_state.error = true;
}

static struct rp_cmd_args {
    ssize_t     len; /* -1 = variable */
    const char *name;
} rp_cmd_args[] = {
    [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
    [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
    [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
1257 1258
    [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
    [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
1259 1260 1261
    [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
};

1262 1263 1264 1265 1266 1267 1268 1269
/*
 * Process a request for pages received on the return path,
 * We're allowed to send more than requested (e.g. to round to our page size)
 * and we don't need to send pages that have already been sent.
 */
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
                                       ram_addr_t start, size_t len)
{
1270 1271
    long our_host_ps = getpagesize();

1272
    trace_migrate_handle_rp_req_pages(rbname, start, len);
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288

    /*
     * Since we currently insist on matching page sizes, just sanity check
     * we're being asked for whole host pages.
     */
    if (start & (our_host_ps-1) ||
       (len & (our_host_ps-1))) {
        error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
                     " len: %zd", __func__, start, len);
        mark_source_rp_bad(ms);
        return;
    }

    if (ram_save_queue_pages(ms, rbname, start, len)) {
        mark_source_rp_bad(ms);
    }
1289 1290
}

1291 1292 1293 1294 1295 1296 1297 1298 1299
/*
 * Handles messages sent on the return path towards the source VM
 *
 */
static void *source_return_path_thread(void *opaque)
{
    MigrationState *ms = opaque;
    QEMUFile *rp = ms->rp_state.from_dst_file;
    uint16_t header_len, header_type;
1300
    uint8_t buf[512];
1301
    uint32_t tmp32, sibling_error;
1302 1303
    ram_addr_t start = 0; /* =0 to silence warning */
    size_t  len = 0, expected_len;
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
    int res;

    trace_source_return_path_thread_entry();
    while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
           migration_is_setup_or_active(ms->state)) {
        trace_source_return_path_thread_loop_top();
        header_type = qemu_get_be16(rp);
        header_len = qemu_get_be16(rp);

        if (header_type >= MIG_RP_MSG_MAX ||
            header_type == MIG_RP_MSG_INVALID) {
            error_report("RP: Received invalid message 0x%04x length 0x%04x",
                    header_type, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        if ((rp_cmd_args[header_type].len != -1 &&
            header_len != rp_cmd_args[header_type].len) ||
1323
            header_len > sizeof(buf)) {
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
            error_report("RP: Received '%s' message (0x%04x) with"
                    "incorrect length %d expecting %zu",
                    rp_cmd_args[header_type].name, header_type, header_len,
                    (size_t)rp_cmd_args[header_type].len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* We know we've got a valid header by this point */
        res = qemu_get_buffer(rp, buf, header_len);
        if (res != header_len) {
            error_report("RP: Failed reading data for message 0x%04x"
                         " read %d expected %d",
                         header_type, res, header_len);
            mark_source_rp_bad(ms);
            goto out;
        }

        /* OK, we have the message and the data */
        switch (header_type) {
        case MIG_RP_MSG_SHUT:
            sibling_error = be32_to_cpup((uint32_t *)buf);
            trace_source_return_path_thread_shut(sibling_error);
            if (sibling_error) {
                error_report("RP: Sibling indicated error %d", sibling_error);
                mark_source_rp_bad(ms);
            }
            /*
             * We'll let the main thread deal with closing the RP
             * we could do a shutdown(2) on it, but we're the only user
             * anyway, so there's nothing gained.
             */
            goto out;

        case MIG_RP_MSG_PONG:
            tmp32 = be32_to_cpup((uint32_t *)buf);
            trace_source_return_path_thread_pong(tmp32);
            break;

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
        case MIG_RP_MSG_REQ_PAGES:
            start = be64_to_cpup((uint64_t *)buf);
            len = be32_to_cpup((uint32_t *)(buf + 8));
            migrate_handle_rp_req_pages(ms, NULL, start, len);
            break;

        case MIG_RP_MSG_REQ_PAGES_ID:
            expected_len = 12 + 1; /* header + termination */

            if (header_len >= expected_len) {
                start = be64_to_cpup((uint64_t *)buf);
                len = be32_to_cpup((uint32_t *)(buf + 8));
                /* Now we expect an idstr */
                tmp32 = buf[12]; /* Length of the following idstr */
                buf[13 + tmp32] = '\0';
                expected_len += tmp32;
            }
            if (header_len != expected_len) {
                error_report("RP: Req_Page_id with length %d expecting %zd",
                        header_len, expected_len);
                mark_source_rp_bad(ms);
                goto out;
            }
            migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
            break;

1389 1390 1391 1392
        default:
            break;
        }
    }
D
Dr. David Alan Gilbert 已提交
1393
    if (qemu_file_get_error(rp)) {
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
        trace_source_return_path_thread_bad_end();
        mark_source_rp_bad(ms);
    }

    trace_source_return_path_thread_end();
out:
    ms->rp_state.from_dst_file = NULL;
    qemu_fclose(rp);
    return NULL;
}

static int open_return_path_on_source(MigrationState *ms)
{

1408
    ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
    if (!ms->rp_state.from_dst_file) {
        return -1;
    }

    trace_open_return_path_on_source();
    qemu_thread_create(&ms->rp_state.rp_thread, "return path",
                       source_return_path_thread, ms, QEMU_THREAD_JOINABLE);

    trace_open_return_path_on_source_continue();

    return 0;
}

/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
static int await_return_path_close_on_source(MigrationState *ms)
{
    /*
     * If this is a normal exit then the destination will send a SHUT and the
     * rp_thread will exit, however if there's an error we need to cause
     * it to exit.
     */
1430
    if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
        /*
         * shutdown(2), if we have it, will cause it to unblock if it's stuck
         * waiting for the destination.
         */
        qemu_file_shutdown(ms->rp_state.from_dst_file);
        mark_source_rp_bad(ms);
    }
    trace_await_return_path_close_on_source_joining();
    qemu_thread_join(&ms->rp_state.rp_thread);
    trace_await_return_path_close_on_source_close();
    return ms->rp_state.error;
}

1444 1445 1446 1447 1448 1449 1450 1451 1452
/*
 * Switch from normal iteration to postcopy
 * Returns non-0 on error
 */
static int postcopy_start(MigrationState *ms, bool *old_vm_running)
{
    int ret;
    const QEMUSizedBuffer *qsb;
    int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1453
    migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
                      MIGRATION_STATUS_POSTCOPY_ACTIVE);

    trace_postcopy_start();
    qemu_mutex_lock_iothread();
    trace_postcopy_start_set_run();

    qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
    *old_vm_running = runstate_is_running();
    global_state_store();
    ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1464 1465 1466
    if (ret < 0) {
        goto fail;
    }
1467

1468
    ret = bdrv_inactivate_all();
1469 1470 1471 1472
    if (ret < 0) {
        goto fail;
    }

1473 1474 1475 1476
    /*
     * Cause any non-postcopiable, but iterative devices to
     * send out their final data.
     */
1477
    qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1478

1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
    /*
     * in Finish migrate and with the io-lock held everything should
     * be quiet, but we've potentially still got dirty pages and we
     * need to tell the destination to throw any pages it's already received
     * that are dirty
     */
    if (ram_postcopy_send_discard_bitmap(ms)) {
        error_report("postcopy send discard bitmap failed");
        goto fail;
    }

    /*
     * send rest of state - note things that are doing postcopy
     * will notice we're in POSTCOPY_ACTIVE and not actually
     * wrap their state up here
     */
1495
    qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1496
    /* Ping just for debugging, helps line traces up */
1497
    qemu_savevm_send_ping(ms->to_dst_file, 2);
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515

    /*
     * While loading the device state we may trigger page transfer
     * requests and the fd must be free to process those, and thus
     * the destination must read the whole device state off the fd before
     * it starts processing it.  Unfortunately the ad-hoc migration format
     * doesn't allow the destination to know the size to read without fully
     * parsing it through each devices load-state code (especially the open
     * coded devices that use get/put).
     * So we wrap the device state up in a package with a length at the start;
     * to do this we use a qemu_buf to hold the whole of the device state.
     */
    QEMUFile *fb = qemu_bufopen("w", NULL);
    if (!fb) {
        error_report("Failed to create buffered file");
        goto fail;
    }

1516 1517 1518 1519 1520 1521
    /*
     * Make sure the receiver can get incoming pages before we send the rest
     * of the state
     */
    qemu_savevm_send_postcopy_listen(fb);

1522
    qemu_savevm_state_complete_precopy(fb, false);
1523 1524 1525 1526 1527 1528 1529 1530
    qemu_savevm_send_ping(fb, 3);

    qemu_savevm_send_postcopy_run(fb);

    /* <><> end of stuff going into the package */
    qsb = qemu_buf_get(fb);

    /* Now send that blob */
1531
    if (qemu_savevm_send_packaged(ms->to_dst_file, qsb)) {
1532 1533 1534
        goto fail_closefb;
    }
    qemu_fclose(fb);
1535 1536 1537 1538 1539 1540 1541 1542

    /* Send a notify to give a chance for anything that needs to happen
     * at the transition to postcopy and after the device state; in particular
     * spice needs to trigger a transition now
     */
    ms->postcopy_after_devices = true;
    notifier_list_notify(&migration_state_notifiers, ms);

1543 1544 1545 1546 1547 1548 1549 1550
    ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;

    qemu_mutex_unlock_iothread();

    /*
     * Although this ping is just for debug, it could potentially be
     * used for getting a better measurement of downtime at the source.
     */
1551
    qemu_savevm_send_ping(ms->to_dst_file, 4);
1552

1553
    ret = qemu_file_get_error(ms->to_dst_file);
1554 1555
    if (ret) {
        error_report("postcopy_start: Migration stream errored");
1556
        migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1557 1558 1559 1560 1561 1562 1563 1564
                              MIGRATION_STATUS_FAILED);
    }

    return ret;

fail_closefb:
    qemu_fclose(fb);
fail:
1565
    migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1566 1567 1568 1569 1570
                          MIGRATION_STATUS_FAILED);
    qemu_mutex_unlock_iothread();
    return -1;
}

1571 1572 1573 1574 1575
/**
 * migration_completion: Used by migration_thread when there's not much left.
 *   The caller 'breaks' the loop when this returns.
 *
 * @s: Current migration state
1576
 * @current_active_state: The migration state we expect to be in
1577 1578 1579
 * @*old_vm_running: Pointer to old_vm_running flag
 * @*start_time: Pointer to time to update
 */
1580 1581
static void migration_completion(MigrationState *s, int current_active_state,
                                 bool *old_vm_running,
1582 1583 1584 1585
                                 int64_t *start_time)
{
    int ret;

1586 1587 1588 1589 1590 1591 1592 1593 1594
    if (s->state == MIGRATION_STATUS_ACTIVE) {
        qemu_mutex_lock_iothread();
        *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
        qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
        *old_vm_running = runstate_is_running();
        ret = global_state_store();

        if (!ret) {
            ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1595 1596 1597
            if (ret >= 0) {
                ret = bdrv_inactivate_all();
            }
1598
            if (ret >= 0) {
1599 1600
                qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
                qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1601 1602 1603
            }
        }
        qemu_mutex_unlock_iothread();
1604

1605 1606
        if (ret < 0) {
            goto fail;
1607
        }
1608 1609 1610
    } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
        trace_migration_completion_postcopy_end();

1611
        qemu_savevm_state_complete_postcopy(s->to_dst_file);
1612
        trace_migration_completion_postcopy_end_after_complete();
1613 1614
    }

1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
    /*
     * If rp was opened we must clean up the thread before
     * cleaning everything else up (since if there are no failures
     * it will wait for the destination to send it's status in
     * a SHUT command).
     * Postcopy opens rp if enabled (even if it's not avtivated)
     */
    if (migrate_postcopy_ram()) {
        int rp_error;
        trace_migration_completion_postcopy_end_before_rp();
        rp_error = await_return_path_close_on_source(s);
        trace_migration_completion_postcopy_end_after_rp(rp_error);
        if (rp_error) {
1628
            goto fail_invalidate;
1629
        }
1630 1631
    }

1632
    if (qemu_file_get_error(s->to_dst_file)) {
1633
        trace_migration_completion_file_err();
1634
        goto fail_invalidate;
1635 1636
    }

1637 1638
    migrate_set_state(&s->state, current_active_state,
                      MIGRATION_STATUS_COMPLETED);
1639 1640
    return;

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
fail_invalidate:
    /* If not doing postcopy, vm_start() will be called: let's regain
     * control on images.
     */
    if (s->state == MIGRATION_STATUS_ACTIVE) {
        Error *local_err = NULL;

        bdrv_invalidate_cache_all(&local_err);
        if (local_err) {
            error_report_err(local_err);
        }
    }

1654
fail:
1655 1656
    migrate_set_state(&s->state, current_active_state,
                      MIGRATION_STATUS_FAILED);
1657 1658
}

1659 1660 1661 1662
/*
 * Master migration thread on the source VM.
 * It drives the migration and pumps the data down the outgoing channel.
 */
J
Juan Quintela 已提交
1663
static void *migration_thread(void *opaque)
1664
{
1665
    MigrationState *s = opaque;
1666
    /* Used by the bandwidth calcs, updated later */
1667 1668
    int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
    int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1669
    int64_t initial_bytes = 0;
1670
    int64_t max_size = 0;
1671
    int64_t start_time = initial_time;
1672
    int64_t end_time;
1673
    bool old_vm_running = false;
1674 1675 1676
    bool entered_postcopy = false;
    /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
    enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1677

1678 1679
    rcu_register_thread();

1680
    qemu_savevm_state_header(s->to_dst_file);
1681 1682 1683

    if (migrate_postcopy_ram()) {
        /* Now tell the dest that it should open its end so it can reply */
1684
        qemu_savevm_send_open_return_path(s->to_dst_file);
1685 1686

        /* And do a ping that will make stuff easier to debug */
1687
        qemu_savevm_send_ping(s->to_dst_file, 1);
1688 1689 1690 1691 1692 1693

        /*
         * Tell the destination that we *might* want to do postcopy later;
         * if the other end can't do postcopy it should fail now, nice and
         * early.
         */
1694
        qemu_savevm_send_postcopy_advise(s->to_dst_file);
1695 1696
    }

1697
    qemu_savevm_state_begin(s->to_dst_file, &s->params);
1698

1699
    s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1700
    current_active_state = MIGRATION_STATUS_ACTIVE;
1701 1702
    migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
                      MIGRATION_STATUS_ACTIVE);
1703

1704 1705 1706 1707
    trace_migration_thread_setup_complete();

    while (s->state == MIGRATION_STATUS_ACTIVE ||
           s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1708
        int64_t current_time;
1709
        uint64_t pending_size;
1710

1711
        if (!qemu_file_rate_limit(s->to_dst_file)) {
1712 1713
            uint64_t pend_post, pend_nonpost;

1714
            qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1715 1716 1717 1718
                                      &pend_post);
            pending_size = pend_nonpost + pend_post;
            trace_migrate_pending(pending_size, max_size,
                                  pend_post, pend_nonpost);
1719
            if (pending_size && pending_size >= max_size) {
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
                /* Still a significant amount to transfer */

                if (migrate_postcopy_ram() &&
                    s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
                    pend_nonpost <= max_size &&
                    atomic_read(&s->start_postcopy)) {

                    if (!postcopy_start(s, &old_vm_running)) {
                        current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
                        entered_postcopy = true;
                    }

                    continue;
                }
                /* Just another iteration step */
1735
                qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1736
            } else {
1737
                trace_migration_thread_low_pending(pending_size);
1738
                migration_completion(s, current_active_state,
1739
                                     &old_vm_running, &start_time);
1740
                break;
1741 1742
            }
        }
1743

1744
        if (qemu_file_get_error(s->to_dst_file)) {
1745 1746
            migrate_set_state(&s->state, current_active_state,
                              MIGRATION_STATUS_FAILED);
1747
            trace_migration_thread_file_err();
1748 1749
            break;
        }
1750
        current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1751
        if (current_time >= initial_time + BUFFER_DELAY) {
1752 1753
            uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
                                         initial_bytes;
1754
            uint64_t time_spent = current_time - initial_time;
1755
            double bandwidth = (double)transferred_bytes / time_spent;
1756 1757
            max_size = bandwidth * migrate_max_downtime() / 1000000;

1758 1759
            s->mbps = (((double) transferred_bytes * 8.0) /
                    ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1760

1761 1762
            trace_migrate_transferred(transferred_bytes, time_spent,
                                      bandwidth, max_size);
1763 1764 1765 1766 1767
            /* if we haven't sent anything, we don't want to recalculate
               10000 is a small enough number for our purposes */
            if (s->dirty_bytes_rate && transferred_bytes > 10000) {
                s->expected_downtime = s->dirty_bytes_rate / bandwidth;
            }
1768

1769
            qemu_file_reset_rate_limit(s->to_dst_file);
1770
            initial_time = current_time;
1771
            initial_bytes = qemu_ftell(s->to_dst_file);
1772
        }
1773
        if (qemu_file_rate_limit(s->to_dst_file)) {
1774 1775 1776
            /* usleep expects microseconds */
            g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
        }
1777 1778
    }

1779
    trace_migration_thread_after_loop();
1780 1781
    /* If we enabled cpu throttling for auto-converge, turn it off. */
    cpu_throttle_stop();
1782
    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1783

1784
    qemu_mutex_lock_iothread();
1785
    qemu_savevm_state_cleanup();
1786
    if (s->state == MIGRATION_STATUS_COMPLETED) {
1787
        uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1788
        s->total_time = end_time - s->total_time;
1789 1790 1791
        if (!entered_postcopy) {
            s->downtime = end_time - start_time;
        }
1792 1793 1794 1795
        if (s->total_time) {
            s->mbps = (((double) transferred_bytes * 8.0) /
                       ((double) s->total_time)) / 1000;
        }
1796 1797
        runstate_set(RUN_STATE_POSTMIGRATE);
    } else {
1798
        if (old_vm_running && !entered_postcopy) {
1799
            vm_start();
1800
        }
1801
    }
1802
    qemu_bh_schedule(s->cleanup_bh);
1803
    qemu_mutex_unlock_iothread();
1804

1805
    rcu_unregister_thread();
1806 1807 1808
    return NULL;
}

1809
void migrate_fd_connect(MigrationState *s)
1810
{
1811 1812
    /* This is a best 1st approximation. ns to ms */
    s->expected_downtime = max_downtime/1000000;
1813
    s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1814

1815
    qemu_file_set_blocking(s->to_dst_file, true);
1816
    qemu_file_set_rate_limit(s->to_dst_file,
1817 1818
                             s->bandwidth_limit / XFER_LIMIT_RATIO);

1819 1820 1821
    /* Notify before starting migration thread */
    notifier_list_notify(&migration_state_notifiers, s);

1822 1823 1824 1825 1826 1827 1828
    /*
     * Open the return path; currently for postcopy but other things might
     * also want it.
     */
    if (migrate_postcopy_ram()) {
        if (open_return_path_on_source(s)) {
            error_report("Unable to open return-path for postcopy");
1829
            migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1830 1831 1832 1833 1834 1835
                              MIGRATION_STATUS_FAILED);
            migrate_fd_cleanup(s);
            return;
        }
    }

1836
    migrate_compress_threads_create();
1837
    qemu_thread_create(&s->thread, "migration", migration_thread, s,
1838
                       QEMU_THREAD_JOINABLE);
1839
    s->migration_thread_running = true;
1840
}
1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852

PostcopyState  postcopy_state_get(void)
{
    return atomic_mb_read(&incoming_postcopy_state);
}

/* Set the state and return the old state */
PostcopyState postcopy_state_set(PostcopyState new_state)
{
    return atomic_xchg(&incoming_postcopy_state, new_state);
}