提交 4e1b31db 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/juanquintela/tags/migration/20171023' into staging

migration/next for 20171023

# gpg: Signature made Mon 23 Oct 2017 17:05:14 BST
# gpg:                using RSA key 0xF487EF185872D723
# gpg: Good signature from "Juan Quintela <quintela@redhat.com>"
# gpg:                 aka "Juan Quintela <quintela@trasno.org>"
# Primary key fingerprint: 1899 FF8E DEBF 58CC EE03  4B82 F487 EF18 5872 D723

* remotes/juanquintela/tags/migration/20171023: (21 commits)
  migration: Improve migration thread error handling
  qapi: Fix grammar in x-multifd-page-count descriptions
  migration: add bitmap for received page
  migration: introduce qemu_ufd_copy_ioctl helper
  migration: postcopy_place_page factoring out
  migration: new ram_init_bitmaps()
  migration: clean up xbzrle cache init/destroy
  migration: provide ram_state_cleanup
  migration: provide ram_state_init()
  migration: pause-before-switchover for postcopy
  migration: allow cancel to unpause
  migrate: HMP migate_continue
  migration: migrate-continue
  migration: Wait for semaphore before completing migration
  migration: Add 'pre-switchover' and 'device' statuses
  migration: Add 'pause-before-switchover' capability
  migration: Make cache_init() take an error parameter
  migration: Move xbzrle cache resize error handling to xbzrle_cache_resize
  migration: Make cache size elements use the right types
  migratiom: Remove max_item_age parameter
  ...
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
...@@ -959,7 +959,19 @@ STEXI ...@@ -959,7 +959,19 @@ STEXI
@item migrate_cancel @item migrate_cancel
@findex migrate_cancel @findex migrate_cancel
Cancel the current VM migration. Cancel the current VM migration.
ETEXI
{
.name = "migrate_continue",
.args_type = "state:s",
.params = "state",
.help = "Continue migration from the given paused state",
.cmd = hmp_migrate_continue,
},
STEXI
@item migrate_continue @var{state}
@findex migrate_continue
Continue migration from the paused state @var{state}
ETEXI ETEXI
{ {
......
...@@ -1495,6 +1495,19 @@ void hmp_migrate_cancel(Monitor *mon, const QDict *qdict) ...@@ -1495,6 +1495,19 @@ void hmp_migrate_cancel(Monitor *mon, const QDict *qdict)
qmp_migrate_cancel(NULL); qmp_migrate_cancel(NULL);
} }
void hmp_migrate_continue(Monitor *mon, const QDict *qdict)
{
Error *err = NULL;
const char *state = qdict_get_str(qdict, "state");
int val = qapi_enum_parse(&MigrationStatus_lookup, state, -1, &err);
if (val >= 0) {
qmp_migrate_continue(val, &err);
}
hmp_handle_error(mon, &err);
}
void hmp_migrate_incoming(Monitor *mon, const QDict *qdict) void hmp_migrate_incoming(Monitor *mon, const QDict *qdict)
{ {
Error *err = NULL; Error *err = NULL;
......
...@@ -68,6 +68,7 @@ void hmp_savevm(Monitor *mon, const QDict *qdict); ...@@ -68,6 +68,7 @@ void hmp_savevm(Monitor *mon, const QDict *qdict);
void hmp_delvm(Monitor *mon, const QDict *qdict); void hmp_delvm(Monitor *mon, const QDict *qdict);
void hmp_info_snapshots(Monitor *mon, const QDict *qdict); void hmp_info_snapshots(Monitor *mon, const QDict *qdict);
void hmp_migrate_cancel(Monitor *mon, const QDict *qdict); void hmp_migrate_cancel(Monitor *mon, const QDict *qdict);
void hmp_migrate_continue(Monitor *mon, const QDict *qdict);
void hmp_migrate_incoming(Monitor *mon, const QDict *qdict); void hmp_migrate_incoming(Monitor *mon, const QDict *qdict);
void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict); void hmp_migrate_set_downtime(Monitor *mon, const QDict *qdict);
void hmp_migrate_set_speed(Monitor *mon, const QDict *qdict); void hmp_migrate_set_speed(Monitor *mon, const QDict *qdict);
......
...@@ -47,6 +47,8 @@ struct RAMBlock { ...@@ -47,6 +47,8 @@ struct RAMBlock {
* of the postcopy phase * of the postcopy phase
*/ */
unsigned long *unsentmap; unsigned long *unsentmap;
/* bitmap of already received pages in postcopy */
unsigned long *receivedmap;
}; };
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
...@@ -60,6 +62,14 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) ...@@ -60,6 +62,14 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
return (char *)block->host + offset; return (char *)block->host + offset;
} }
static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
RAMBlock *rb)
{
uint64_t host_addr_offset =
(uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
return host_addr_offset >> TARGET_PAGE_BITS;
}
long qemu_getrampagesize(void); long qemu_getrampagesize(void);
unsigned long last_ram_page(void); unsigned long last_ram_page(void);
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
......
...@@ -104,6 +104,9 @@ enum mig_rp_message_type { ...@@ -104,6 +104,9 @@ enum mig_rp_message_type {
static MigrationState *current_migration; static MigrationState *current_migration;
static bool migration_object_check(MigrationState *ms, Error **errp); static bool migration_object_check(MigrationState *ms, Error **errp);
static int migration_maybe_pause(MigrationState *s,
int *current_active_state,
int new_state);
void migration_object_init(void) void migration_object_init(void)
{ {
...@@ -526,6 +529,8 @@ static bool migration_is_setup_or_active(int state) ...@@ -526,6 +529,8 @@ static bool migration_is_setup_or_active(int state)
case MIGRATION_STATUS_ACTIVE: case MIGRATION_STATUS_ACTIVE:
case MIGRATION_STATUS_POSTCOPY_ACTIVE: case MIGRATION_STATUS_POSTCOPY_ACTIVE:
case MIGRATION_STATUS_SETUP: case MIGRATION_STATUS_SETUP:
case MIGRATION_STATUS_PRE_SWITCHOVER:
case MIGRATION_STATUS_DEVICE:
return true; return true;
default: default:
...@@ -600,6 +605,8 @@ MigrationInfo *qmp_query_migrate(Error **errp) ...@@ -600,6 +605,8 @@ MigrationInfo *qmp_query_migrate(Error **errp)
case MIGRATION_STATUS_ACTIVE: case MIGRATION_STATUS_ACTIVE:
case MIGRATION_STATUS_CANCELLING: case MIGRATION_STATUS_CANCELLING:
case MIGRATION_STATUS_POSTCOPY_ACTIVE: case MIGRATION_STATUS_POSTCOPY_ACTIVE:
case MIGRATION_STATUS_PRE_SWITCHOVER:
case MIGRATION_STATUS_DEVICE:
/* TODO add some postcopy stats */ /* TODO add some postcopy stats */
info->has_status = true; info->has_status = true;
info->has_total_time = true; info->has_total_time = true;
...@@ -865,6 +872,12 @@ static void migrate_params_test_apply(MigrateSetParameters *params, ...@@ -865,6 +872,12 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
if (params->has_block_incremental) { if (params->has_block_incremental) {
dest->block_incremental = params->block_incremental; dest->block_incremental = params->block_incremental;
} }
if (params->has_x_multifd_channels) {
dest->x_multifd_channels = params->x_multifd_channels;
}
if (params->has_x_multifd_page_count) {
dest->x_multifd_page_count = params->x_multifd_page_count;
}
} }
static void migrate_params_apply(MigrateSetParameters *params) static void migrate_params_apply(MigrateSetParameters *params)
...@@ -1071,19 +1084,30 @@ static void migrate_fd_cleanup(void *opaque) ...@@ -1071,19 +1084,30 @@ static void migrate_fd_cleanup(void *opaque)
MIGRATION_STATUS_CANCELLED); MIGRATION_STATUS_CANCELLED);
} }
if (s->error) {
/* It is used on info migrate. We can't free it */
error_report_err(error_copy(s->error));
}
notifier_list_notify(&migration_state_notifiers, s); notifier_list_notify(&migration_state_notifiers, s);
block_cleanup_parameters(s); block_cleanup_parameters(s);
} }
void migrate_set_error(MigrationState *s, const Error *error)
{
qemu_mutex_lock(&s->error_mutex);
if (!s->error) {
s->error = error_copy(error);
}
qemu_mutex_unlock(&s->error_mutex);
}
void migrate_fd_error(MigrationState *s, const Error *error) void migrate_fd_error(MigrationState *s, const Error *error)
{ {
trace_migrate_fd_error(error_get_pretty(error)); trace_migrate_fd_error(error_get_pretty(error));
assert(s->to_dst_file == NULL); assert(s->to_dst_file == NULL);
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
MIGRATION_STATUS_FAILED); MIGRATION_STATUS_FAILED);
if (!s->error) { migrate_set_error(s, error);
s->error = error_copy(error);
}
notifier_list_notify(&migration_state_notifiers, s); notifier_list_notify(&migration_state_notifiers, s);
block_cleanup_parameters(s); block_cleanup_parameters(s);
} }
...@@ -1104,6 +1128,10 @@ static void migrate_fd_cancel(MigrationState *s) ...@@ -1104,6 +1128,10 @@ static void migrate_fd_cancel(MigrationState *s)
if (!migration_is_setup_or_active(old_state)) { if (!migration_is_setup_or_active(old_state)) {
break; break;
} }
/* If the migration is paused, kick it out of the pause */
if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
qemu_sem_post(&s->pause_sem);
}
migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING); migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
} while (s->state != MIGRATION_STATUS_CANCELLING); } while (s->state != MIGRATION_STATUS_CANCELLING);
...@@ -1183,6 +1211,8 @@ bool migration_is_idle(void) ...@@ -1183,6 +1211,8 @@ bool migration_is_idle(void)
case MIGRATION_STATUS_ACTIVE: case MIGRATION_STATUS_ACTIVE:
case MIGRATION_STATUS_POSTCOPY_ACTIVE: case MIGRATION_STATUS_POSTCOPY_ACTIVE:
case MIGRATION_STATUS_COLO: case MIGRATION_STATUS_COLO:
case MIGRATION_STATUS_PRE_SWITCHOVER:
case MIGRATION_STATUS_DEVICE:
return false; return false;
case MIGRATION_STATUS__MAX: case MIGRATION_STATUS__MAX:
g_assert_not_reached(); g_assert_not_reached();
...@@ -1362,29 +1392,24 @@ void qmp_migrate_cancel(Error **errp) ...@@ -1362,29 +1392,24 @@ void qmp_migrate_cancel(Error **errp)
migrate_fd_cancel(migrate_get_current()); migrate_fd_cancel(migrate_get_current());
} }
void qmp_migrate_set_cache_size(int64_t value, Error **errp) void qmp_migrate_continue(MigrationStatus state, Error **errp)
{ {
MigrationState *s = migrate_get_current(); MigrationState *s = migrate_get_current();
int64_t new_size; if (s->state != state) {
error_setg(errp, "Migration not in expected state: %s",
/* Check for truncation */ MigrationStatus_str(s->state));
if (value != (size_t)value) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"exceeding address space");
return; return;
} }
qemu_sem_post(&s->pause_sem);
}
/* Cache should not be larger than guest ram size */ void qmp_migrate_set_cache_size(int64_t value, Error **errp)
if (value > ram_bytes_total()) { {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", MigrationState *s = migrate_get_current();
"exceeds guest ram size "); int64_t new_size;
return;
}
new_size = xbzrle_cache_resize(value); new_size = xbzrle_cache_resize(value, errp);
if (new_size < 0) { if (new_size < 0) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"is smaller than page size");
return; return;
} }
...@@ -1521,6 +1546,16 @@ bool migrate_use_multifd(void) ...@@ -1521,6 +1546,16 @@ bool migrate_use_multifd(void)
return s->enabled_capabilities[MIGRATION_CAPABILITY_X_MULTIFD]; return s->enabled_capabilities[MIGRATION_CAPABILITY_X_MULTIFD];
} }
bool migrate_pause_before_switchover(void)
{
MigrationState *s;
s = migrate_get_current();
return s->enabled_capabilities[
MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
}
int migrate_multifd_channels(void) int migrate_multifd_channels(void)
{ {
MigrationState *s; MigrationState *s;
...@@ -1799,8 +1834,11 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running) ...@@ -1799,8 +1834,11 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
QEMUFile *fb; QEMUFile *fb;
int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
bool restart_block = false; bool restart_block = false;
migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE, int cur_state = MIGRATION_STATUS_ACTIVE;
MIGRATION_STATUS_POSTCOPY_ACTIVE); if (!migrate_pause_before_switchover()) {
migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_POSTCOPY_ACTIVE);
}
trace_postcopy_start(); trace_postcopy_start();
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
...@@ -1814,6 +1852,12 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running) ...@@ -1814,6 +1852,12 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running)
goto fail; goto fail;
} }
ret = migration_maybe_pause(ms, &cur_state,
MIGRATION_STATUS_POSTCOPY_ACTIVE);
if (ret < 0) {
goto fail;
}
ret = bdrv_inactivate_all(); ret = bdrv_inactivate_all();
if (ret < 0) { if (ret < 0) {
goto fail; goto fail;
...@@ -1951,6 +1995,41 @@ fail: ...@@ -1951,6 +1995,41 @@ fail:
return -1; return -1;
} }
/**
* migration_maybe_pause: Pause if required to by
* migrate_pause_before_switchover called with the iothread locked
* Returns: 0 on success
*/
static int migration_maybe_pause(MigrationState *s,
int *current_active_state,
int new_state)
{
if (!migrate_pause_before_switchover()) {
return 0;
}
/* Since leaving this state is not atomic with posting the semaphore
* it's possible that someone could have issued multiple migrate_continue
* and the semaphore is incorrectly positive at this point;
* the docs say it's undefined to reinit a semaphore that's already
* init'd, so use timedwait to eat up any existing posts.
*/
while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
/* This block intentionally left blank */
}
qemu_mutex_unlock_iothread();
migrate_set_state(&s->state, *current_active_state,
MIGRATION_STATUS_PRE_SWITCHOVER);
qemu_sem_wait(&s->pause_sem);
migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
new_state);
*current_active_state = new_state;
qemu_mutex_lock_iothread();
return s->state == new_state ? 0 : -EINVAL;
}
/** /**
* migration_completion: Used by migration_thread when there's not much left. * migration_completion: Used by migration_thread when there's not much left.
* The caller 'breaks' the loop when this returns. * The caller 'breaks' the loop when this returns.
...@@ -1976,6 +2055,10 @@ static void migration_completion(MigrationState *s, int current_active_state, ...@@ -1976,6 +2055,10 @@ static void migration_completion(MigrationState *s, int current_active_state,
if (!ret) { if (!ret) {
bool inactivate = !migrate_colo_enabled(); bool inactivate = !migrate_colo_enabled();
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
if (ret >= 0) {
ret = migration_maybe_pause(s, &current_active_state,
MIGRATION_STATUS_DEVICE);
}
if (ret >= 0) { if (ret >= 0) {
qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
...@@ -2355,8 +2438,10 @@ static void migration_instance_finalize(Object *obj) ...@@ -2355,8 +2438,10 @@ static void migration_instance_finalize(Object *obj)
MigrationState *ms = MIGRATION_OBJ(obj); MigrationState *ms = MIGRATION_OBJ(obj);
MigrationParameters *params = &ms->parameters; MigrationParameters *params = &ms->parameters;
qemu_mutex_destroy(&ms->error_mutex);
g_free(params->tls_hostname); g_free(params->tls_hostname);
g_free(params->tls_creds); g_free(params->tls_creds);
qemu_sem_destroy(&ms->pause_sem);
} }
static void migration_instance_init(Object *obj) static void migration_instance_init(Object *obj)
...@@ -2367,6 +2452,8 @@ static void migration_instance_init(Object *obj) ...@@ -2367,6 +2452,8 @@ static void migration_instance_init(Object *obj)
ms->state = MIGRATION_STATUS_NONE; ms->state = MIGRATION_STATUS_NONE;
ms->xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE; ms->xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE;
ms->mbps = -1; ms->mbps = -1;
qemu_sem_init(&ms->pause_sem, 0);
qemu_mutex_init(&ms->error_mutex);
params->tls_hostname = g_strdup(""); params->tls_hostname = g_strdup("");
params->tls_creds = g_strdup(""); params->tls_creds = g_strdup("");
......
...@@ -121,6 +121,9 @@ struct MigrationState ...@@ -121,6 +121,9 @@ struct MigrationState
/* Flag set once the migration thread called bdrv_inactivate_all */ /* Flag set once the migration thread called bdrv_inactivate_all */
bool block_inactive; bool block_inactive;
/* Migration is paused due to pause-before-switchover */
QemuSemaphore pause_sem;
/* The semaphore is used to notify COLO thread that failover is finished */ /* The semaphore is used to notify COLO thread that failover is finished */
QemuSemaphore colo_exit_sem; QemuSemaphore colo_exit_sem;
...@@ -129,8 +132,12 @@ struct MigrationState ...@@ -129,8 +132,12 @@ struct MigrationState
int64_t colo_checkpoint_time; int64_t colo_checkpoint_time;
QEMUTimer *colo_delay_timer; QEMUTimer *colo_delay_timer;
/* The last error that occurred */ /* The first error that has occurred.
We used the mutex to be able to return the 1st error message */
Error *error; Error *error;
/* mutex to protect errp */
QemuMutex error_mutex;
/* Do we have to clean up -b/-i from old migrate parameters */ /* Do we have to clean up -b/-i from old migrate parameters */
/* This feature is deprecated and will be removed */ /* This feature is deprecated and will be removed */
bool must_remove_block_options; bool must_remove_block_options;
...@@ -159,6 +166,7 @@ bool migration_has_all_channels(void); ...@@ -159,6 +166,7 @@ bool migration_has_all_channels(void);
uint64_t migrate_max_downtime(void); uint64_t migrate_max_downtime(void);
void migrate_set_error(MigrationState *s, const Error *error);
void migrate_fd_error(MigrationState *s, const Error *error); void migrate_fd_error(MigrationState *s, const Error *error);
void migrate_fd_connect(MigrationState *s); void migrate_fd_connect(MigrationState *s);
...@@ -177,6 +185,7 @@ bool migrate_zero_blocks(void); ...@@ -177,6 +185,7 @@ bool migrate_zero_blocks(void);
bool migrate_auto_converge(void); bool migrate_auto_converge(void);
bool migrate_use_multifd(void); bool migrate_use_multifd(void);
bool migrate_pause_before_switchover(void);
int migrate_multifd_channels(void); int migrate_multifd_channels(void);
int migrate_multifd_page_count(void); int migrate_multifd_page_count(void);
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qapi/qmp/qerror.h"
#include "qapi/error.h"
#include "qemu-common.h" #include "qemu-common.h"
#include "qemu/host-utils.h" #include "qemu/host-utils.h"
#include "migration/page_cache.h" #include "migration/page_cache.h"
...@@ -39,27 +41,28 @@ struct CacheItem { ...@@ -39,27 +41,28 @@ struct CacheItem {
struct PageCache { struct PageCache {
CacheItem *page_cache; CacheItem *page_cache;
unsigned int page_size; size_t page_size;
int64_t max_num_items; size_t max_num_items;
uint64_t max_item_age; size_t num_items;
int64_t num_items;
}; };
PageCache *cache_init(int64_t num_pages, unsigned int page_size) PageCache *cache_init(int64_t new_size, size_t page_size, Error **errp)
{ {
int64_t i; int64_t i;
size_t num_pages = new_size / page_size;
PageCache *cache; PageCache *cache;
if (num_pages <= 0) { if (new_size < page_size) {
DPRINTF("invalid number of pages\n"); error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"is smaller than one target page size");
return NULL; return NULL;
} }
/* We prefer not to abort if there is no memory */ /* We prefer not to abort if there is no memory */
cache = g_try_malloc(sizeof(*cache)); cache = g_try_malloc(sizeof(*cache));
if (!cache) { if (!cache) {
DPRINTF("Failed to allocate cache\n"); error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"Failed to allocate cache");
return NULL; return NULL;
} }
/* round down to the nearest power of 2 */ /* round down to the nearest power of 2 */
...@@ -69,7 +72,6 @@ PageCache *cache_init(int64_t num_pages, unsigned int page_size) ...@@ -69,7 +72,6 @@ PageCache *cache_init(int64_t num_pages, unsigned int page_size)
} }
cache->page_size = page_size; cache->page_size = page_size;
cache->num_items = 0; cache->num_items = 0;
cache->max_item_age = 0;
cache->max_num_items = num_pages; cache->max_num_items = num_pages;
DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items); DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items);
...@@ -78,7 +80,8 @@ PageCache *cache_init(int64_t num_pages, unsigned int page_size) ...@@ -78,7 +80,8 @@ PageCache *cache_init(int64_t num_pages, unsigned int page_size)
cache->page_cache = g_try_malloc((cache->max_num_items) * cache->page_cache = g_try_malloc((cache->max_num_items) *
sizeof(*cache->page_cache)); sizeof(*cache->page_cache));
if (!cache->page_cache) { if (!cache->page_cache) {
DPRINTF("Failed to allocate cache->page_cache\n"); error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"Failed to allocate page cache");
g_free(cache); g_free(cache);
return NULL; return NULL;
} }
......
...@@ -24,12 +24,11 @@ typedef struct PageCache PageCache; ...@@ -24,12 +24,11 @@ typedef struct PageCache PageCache;
* *
* Returns new allocated cache or NULL on error * Returns new allocated cache or NULL on error
* *
* @cache pointer to the PageCache struct * @cache_size: cache size in bytes
* @num_pages: cache maximal number of cached pages
* @page_size: cache page size * @page_size: cache page size
* @errp: set *errp if the check failed, with reason
*/ */
PageCache *cache_init(int64_t num_pages, unsigned int page_size); PageCache *cache_init(int64_t cache_size, size_t page_size, Error **errp);
/** /**
* cache_fini: free all cache resources * cache_fini: free all cache resources
* @cache pointer to the PageCache struct * @cache pointer to the PageCache struct
......
...@@ -641,26 +641,46 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis) ...@@ -641,26 +641,46 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
return 0; return 0;
} }
static int qemu_ufd_copy_ioctl(int userfault_fd, void *host_addr,
void *from_addr, uint64_t pagesize, RAMBlock *rb)
{
int ret;
if (from_addr) {
struct uffdio_copy copy_struct;
copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
copy_struct.src = (uint64_t)(uintptr_t)from_addr;
copy_struct.len = pagesize;
copy_struct.mode = 0;
ret = ioctl(userfault_fd, UFFDIO_COPY, &copy_struct);
} else {
struct uffdio_zeropage zero_struct;
zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
zero_struct.range.len = pagesize;
zero_struct.mode = 0;
ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
}
if (!ret) {
ramblock_recv_bitmap_set_range(rb, host_addr,
pagesize / qemu_target_page_size());
}
return ret;
}
/* /*
* Place a host page (from) at (host) atomically * Place a host page (from) at (host) atomically
* returns 0 on success * returns 0 on success
*/ */
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
size_t pagesize) RAMBlock *rb)
{ {
struct uffdio_copy copy_struct; size_t pagesize = qemu_ram_pagesize(rb);
copy_struct.dst = (uint64_t)(uintptr_t)host;
copy_struct.src = (uint64_t)(uintptr_t)from;
copy_struct.len = pagesize;
copy_struct.mode = 0;
/* copy also acks to the kernel waking the stalled thread up /* copy also acks to the kernel waking the stalled thread up
* TODO: We can inhibit that ack and only do it if it was requested * TODO: We can inhibit that ack and only do it if it was requested
* which would be slightly cheaper, but we'd have to be careful * which would be slightly cheaper, but we'd have to be careful
* of the order of updating our page state. * of the order of updating our page state.
*/ */
if (ioctl(mis->userfault_fd, UFFDIO_COPY, &copy_struct)) { if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, from, pagesize, rb)) {
int e = errno; int e = errno;
error_report("%s: %s copy host: %p from: %p (size: %zd)", error_report("%s: %s copy host: %p from: %p (size: %zd)",
__func__, strerror(e), host, from, pagesize); __func__, strerror(e), host, from, pagesize);
...@@ -677,17 +697,13 @@ int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, ...@@ -677,17 +697,13 @@ int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
* returns 0 on success * returns 0 on success
*/ */
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
size_t pagesize) RAMBlock *rb)
{ {
trace_postcopy_place_page_zero(host); trace_postcopy_place_page_zero(host);
if (pagesize == getpagesize()) { if (qemu_ram_pagesize(rb) == getpagesize()) {
struct uffdio_zeropage zero_struct; if (qemu_ufd_copy_ioctl(mis->userfault_fd, host, NULL, getpagesize(),
zero_struct.range.start = (uint64_t)(uintptr_t)host; rb)) {
zero_struct.range.len = getpagesize();
zero_struct.mode = 0;
if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) {
int e = errno; int e = errno;
error_report("%s: %s zero host: %p", error_report("%s: %s zero host: %p",
__func__, strerror(e), host); __func__, strerror(e), host);
...@@ -711,7 +727,7 @@ int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, ...@@ -711,7 +727,7 @@ int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size);
} }
return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page,
pagesize); rb);
} }
return 0; return 0;
...@@ -774,14 +790,14 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis) ...@@ -774,14 +790,14 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
} }
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
size_t pagesize) RAMBlock *rb)
{ {
assert(0); assert(0);
return -1; return -1;
} }
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
size_t pagesize) RAMBlock *rb)
{ {
assert(0); assert(0);
return -1; return -1;
......
...@@ -72,14 +72,14 @@ void postcopy_discard_send_finish(MigrationState *ms, ...@@ -72,14 +72,14 @@ void postcopy_discard_send_finish(MigrationState *ms,
* returns 0 on success * returns 0 on success
*/ */
int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
size_t pagesize); RAMBlock *rb);
/* /*
* Place a zero page at (host) atomically * Place a zero page at (host) atomically
* returns 0 on success * returns 0 on success
*/ */
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
size_t pagesize); RAMBlock *rb);
/* The current postcopy state is read/set by postcopy_state_get/set /* The current postcopy state is read/set by postcopy_state_get/set
* which update it atomically. * which update it atomically.
......
...@@ -42,8 +42,10 @@ ...@@ -42,8 +42,10 @@
#include "postcopy-ram.h" #include "postcopy-ram.h"
#include "migration/page_cache.h" #include "migration/page_cache.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qapi/qmp/qerror.h"
#include "trace.h" #include "trace.h"
#include "exec/ram_addr.h" #include "exec/ram_addr.h"
#include "exec/target_page.h"
#include "qemu/rcu_queue.h" #include "qemu/rcu_queue.h"
#include "migration/colo.h" #include "migration/colo.h"
#include "migration/block.h" #include "migration/block.h"
...@@ -113,13 +115,24 @@ static void XBZRLE_cache_unlock(void) ...@@ -113,13 +115,24 @@ static void XBZRLE_cache_unlock(void)
* Returns the new_size or negative in case of error. * Returns the new_size or negative in case of error.
* *
* @new_size: new cache size * @new_size: new cache size
* @errp: set *errp if the check failed, with reason
*/ */
int64_t xbzrle_cache_resize(int64_t new_size) int64_t xbzrle_cache_resize(int64_t new_size, Error **errp)
{ {
PageCache *new_cache; PageCache *new_cache;
int64_t ret; int64_t ret;
if (new_size < TARGET_PAGE_SIZE) { /* Check for truncation */
if (new_size != (size_t)new_size) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"exceeding address space");
return -1;
}
/* Cache should not be larger than guest ram size */
if (new_size > ram_bytes_total()) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
"exceeds guest ram size");
return -1; return -1;
} }
...@@ -129,10 +142,8 @@ int64_t xbzrle_cache_resize(int64_t new_size) ...@@ -129,10 +142,8 @@ int64_t xbzrle_cache_resize(int64_t new_size)
if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
goto out_new_size; goto out_new_size;
} }
new_cache = cache_init(new_size / TARGET_PAGE_SIZE, new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
TARGET_PAGE_SIZE);
if (!new_cache) { if (!new_cache) {
error_report("Error creating cache");
ret = -1; ret = -1;
goto out; goto out;
} }
...@@ -148,6 +159,35 @@ out: ...@@ -148,6 +159,35 @@ out:
return ret; return ret;
} }
static void ramblock_recv_map_init(void)
{
RAMBlock *rb;
RAMBLOCK_FOREACH(rb) {
assert(!rb->receivedmap);
rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
}
}
int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
{
return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
rb->receivedmap);
}
void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
{
set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
}
void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
size_t nr)
{
bitmap_set_atomic(rb->receivedmap,
ramblock_recv_bitmap_offset(host_addr, rb),
nr);
}
/* /*
* An outstanding page request, on the source, having been received * An outstanding page request, on the source, having been received
* and queued * and queued
...@@ -1566,6 +1606,31 @@ static void xbzrle_load_cleanup(void) ...@@ -1566,6 +1606,31 @@ static void xbzrle_load_cleanup(void)
XBZRLE.decoded_buf = NULL; XBZRLE.decoded_buf = NULL;
} }
static void ram_state_cleanup(RAMState **rsp)
{
migration_page_queue_free(*rsp);
qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
g_free(*rsp);
*rsp = NULL;
}
static void xbzrle_cleanup(void)
{
XBZRLE_cache_lock();
if (XBZRLE.cache) {
cache_fini(XBZRLE.cache);
g_free(XBZRLE.encoded_buf);
g_free(XBZRLE.current_buf);
g_free(XBZRLE.zero_target_page);
XBZRLE.cache = NULL;
XBZRLE.encoded_buf = NULL;
XBZRLE.current_buf = NULL;
XBZRLE.zero_target_page = NULL;
}
XBZRLE_cache_unlock();
}
static void ram_save_cleanup(void *opaque) static void ram_save_cleanup(void *opaque)
{ {
RAMState **rsp = opaque; RAMState **rsp = opaque;
...@@ -1583,22 +1648,9 @@ static void ram_save_cleanup(void *opaque) ...@@ -1583,22 +1648,9 @@ static void ram_save_cleanup(void *opaque)
block->unsentmap = NULL; block->unsentmap = NULL;
} }
XBZRLE_cache_lock(); xbzrle_cleanup();
if (XBZRLE.cache) {
cache_fini(XBZRLE.cache);
g_free(XBZRLE.encoded_buf);
g_free(XBZRLE.current_buf);
g_free(XBZRLE.zero_target_page);
XBZRLE.cache = NULL;
XBZRLE.encoded_buf = NULL;
XBZRLE.current_buf = NULL;
XBZRLE.zero_target_page = NULL;
}
XBZRLE_cache_unlock();
migration_page_queue_free(*rsp);
compress_threads_save_cleanup(); compress_threads_save_cleanup();
g_free(*rsp); ram_state_cleanup(rsp);
*rsp = NULL;
} }
static void ram_state_reset(RAMState *rs) static void ram_state_reset(RAMState *rs)
...@@ -1999,6 +2051,8 @@ int ram_discard_range(const char *rbname, uint64_t start, size_t length) ...@@ -1999,6 +2051,8 @@ int ram_discard_range(const char *rbname, uint64_t start, size_t length)
goto err; goto err;
} }
bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
length >> qemu_target_page_bits());
ret = ram_block_discard_range(rb, start, length); ret = ram_block_discard_range(rb, start, length);
err: err:
...@@ -2007,63 +2061,96 @@ err: ...@@ -2007,63 +2061,96 @@ err:
return ret; return ret;
} }
/*
* For every allocation, we will try not to crash the VM if the
* allocation failed.
*/
static int xbzrle_init(void)
{
Error *local_err = NULL;
if (!migrate_use_xbzrle()) {
return 0;
}
XBZRLE_cache_lock();
XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.zero_target_page) {
error_report("%s: Error allocating zero page", __func__);
goto err_out;
}
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
TARGET_PAGE_SIZE, &local_err);
if (!XBZRLE.cache) {
error_report_err(local_err);
goto free_zero_page;
}
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.encoded_buf) {
error_report("%s: Error allocating encoded_buf", __func__);
goto free_cache;
}
XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
if (!XBZRLE.current_buf) {
error_report("%s: Error allocating current_buf", __func__);
goto free_encoded_buf;
}
/* We are all good */
XBZRLE_cache_unlock();
return 0;
free_encoded_buf:
g_free(XBZRLE.encoded_buf);
XBZRLE.encoded_buf = NULL;
free_cache:
cache_fini(XBZRLE.cache);
XBZRLE.cache = NULL;
free_zero_page:
g_free(XBZRLE.zero_target_page);
XBZRLE.zero_target_page = NULL;
err_out:
XBZRLE_cache_unlock();
return -ENOMEM;
}
static int ram_state_init(RAMState **rsp) static int ram_state_init(RAMState **rsp)
{ {
*rsp = g_new0(RAMState, 1); *rsp = g_try_new0(RAMState, 1);
if (!*rsp) {
error_report("%s: Init ramstate fail", __func__);
return -1;
}
qemu_mutex_init(&(*rsp)->bitmap_mutex); qemu_mutex_init(&(*rsp)->bitmap_mutex);
qemu_mutex_init(&(*rsp)->src_page_req_mutex); qemu_mutex_init(&(*rsp)->src_page_req_mutex);
QSIMPLEQ_INIT(&(*rsp)->src_page_requests); QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
if (migrate_use_xbzrle()) { /*
XBZRLE_cache_lock(); * Count the total number of pages used by ram blocks not including any
XBZRLE.zero_target_page = g_malloc0(TARGET_PAGE_SIZE); * gaps due to alignment or unplugs.
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / */
TARGET_PAGE_SIZE, (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
TARGET_PAGE_SIZE);
if (!XBZRLE.cache) {
XBZRLE_cache_unlock();
error_report("Error creating cache");
g_free(*rsp);
*rsp = NULL;
return -1;
}
XBZRLE_cache_unlock();
/* We prefer not to abort if there is no memory */
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.encoded_buf) {
error_report("Error allocating encoded_buf");
g_free(*rsp);
*rsp = NULL;
return -1;
}
XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); ram_state_reset(*rsp);
if (!XBZRLE.current_buf) {
error_report("Error allocating current_buf");
g_free(XBZRLE.encoded_buf);
XBZRLE.encoded_buf = NULL;
g_free(*rsp);
*rsp = NULL;
return -1;
}
}
/* For memory_global_dirty_log_start below. */ return 0;
qemu_mutex_lock_iothread(); }
qemu_mutex_lock_ramlist(); static void ram_list_init_bitmaps(void)
rcu_read_lock(); {
ram_state_reset(*rsp); RAMBlock *block;
unsigned long pages;
/* Skip setting bitmap if there is no RAM */ /* Skip setting bitmap if there is no RAM */
if (ram_bytes_total()) { if (ram_bytes_total()) {
RAMBlock *block;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
unsigned long pages = block->max_length >> TARGET_PAGE_BITS; pages = block->max_length >> TARGET_PAGE_BITS;
block->bmap = bitmap_new(pages); block->bmap = bitmap_new(pages);
bitmap_set(block->bmap, 0, pages); bitmap_set(block->bmap, 0, pages);
if (migrate_postcopy_ram()) { if (migrate_postcopy_ram()) {
...@@ -2072,18 +2159,36 @@ static int ram_state_init(RAMState **rsp) ...@@ -2072,18 +2159,36 @@ static int ram_state_init(RAMState **rsp)
} }
} }
} }
}
/* static void ram_init_bitmaps(RAMState *rs)
* Count the total number of pages used by ram blocks not including any {
* gaps due to alignment or unplugs. /* For memory_global_dirty_log_start below. */
*/ qemu_mutex_lock_iothread();
(*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; qemu_mutex_lock_ramlist();
rcu_read_lock();
ram_list_init_bitmaps();
memory_global_dirty_log_start(); memory_global_dirty_log_start();
migration_bitmap_sync(*rsp); migration_bitmap_sync(rs);
rcu_read_unlock();
qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_ramlist();
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
rcu_read_unlock(); }
static int ram_init_all(RAMState **rsp)
{
if (ram_state_init(rsp)) {
return -1;
}
if (xbzrle_init()) {
ram_state_cleanup(rsp);
return -1;
}
ram_init_bitmaps(*rsp);
return 0; return 0;
} }
...@@ -2110,7 +2215,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) ...@@ -2110,7 +2215,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
/* migration has already setup the bitmap, reuse it. */ /* migration has already setup the bitmap, reuse it. */
if (!migration_in_colo_state()) { if (!migration_in_colo_state()) {
if (ram_state_init(rsp) != 0) { if (ram_init_all(rsp) != 0) {
return -1; return -1;
} }
} }
...@@ -2534,13 +2639,20 @@ static int ram_load_setup(QEMUFile *f, void *opaque) ...@@ -2534,13 +2639,20 @@ static int ram_load_setup(QEMUFile *f, void *opaque)
{ {
xbzrle_load_setup(); xbzrle_load_setup();
compress_threads_load_setup(); compress_threads_load_setup();
ramblock_recv_map_init();
return 0; return 0;
} }
static int ram_load_cleanup(void *opaque) static int ram_load_cleanup(void *opaque)
{ {
RAMBlock *rb;
xbzrle_load_cleanup(); xbzrle_load_cleanup();
compress_threads_load_cleanup(); compress_threads_load_cleanup();
RAMBLOCK_FOREACH(rb) {
g_free(rb->receivedmap);
rb->receivedmap = NULL;
}
return 0; return 0;
} }
...@@ -2680,10 +2792,10 @@ static int ram_load_postcopy(QEMUFile *f) ...@@ -2680,10 +2792,10 @@ static int ram_load_postcopy(QEMUFile *f)
if (all_zero) { if (all_zero) {
ret = postcopy_place_page_zero(mis, place_dest, ret = postcopy_place_page_zero(mis, place_dest,
block->page_size); block);
} else { } else {
ret = postcopy_place_page(mis, place_dest, ret = postcopy_place_page(mis, place_dest,
place_source, block->page_size); place_source, block);
} }
} }
if (!ret) { if (!ret) {
...@@ -2755,6 +2867,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) ...@@ -2755,6 +2867,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
ramblock_recv_bitmap_set(block, host);
trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
} }
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
extern MigrationStats ram_counters; extern MigrationStats ram_counters;
extern XBZRLECacheStats xbzrle_counters; extern XBZRLECacheStats xbzrle_counters;
int64_t xbzrle_cache_resize(int64_t new_size); int64_t xbzrle_cache_resize(int64_t new_size, Error **errp);
uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void); uint64_t ram_bytes_total(void);
...@@ -57,4 +57,9 @@ int ram_discard_range(const char *block_name, uint64_t start, size_t length); ...@@ -57,4 +57,9 @@ int ram_discard_range(const char *block_name, uint64_t start, size_t length);
int ram_postcopy_incoming_init(MigrationIncomingState *mis); int ram_postcopy_incoming_init(MigrationIncomingState *mis);
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size); void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr);
void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr);
void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, size_t nr);
#endif #endif
...@@ -119,7 +119,6 @@ static void migration_tls_outgoing_handshake(QIOTask *task, ...@@ -119,7 +119,6 @@ static void migration_tls_outgoing_handshake(QIOTask *task,
if (qio_task_propagate_error(task, &err)) { if (qio_task_propagate_error(task, &err)) {
trace_migration_tls_outgoing_handshake_error(error_get_pretty(err)); trace_migration_tls_outgoing_handshake_error(error_get_pretty(err));
migrate_fd_error(s, err); migrate_fd_error(s, err);
error_free(err);
} else { } else {
trace_migration_tls_outgoing_handshake_complete(); trace_migration_tls_outgoing_handshake_complete();
migration_channel_connect(s, ioc, NULL); migration_channel_connect(s, ioc, NULL);
......
...@@ -96,12 +96,18 @@ ...@@ -96,12 +96,18 @@
# @colo: VM is in the process of fault tolerance, VM can not get into this # @colo: VM is in the process of fault tolerance, VM can not get into this
# state unless colo capability is enabled for migration. (since 2.8) # state unless colo capability is enabled for migration. (since 2.8)
# #
# @pre-switchover: Paused before device serialisation. (since 2.11)
#
# @device: During device serialisation when pause-before-switchover is enabled
# (since 2.11)
#
# Since: 2.3 # Since: 2.3
# #
## ##
{ 'enum': 'MigrationStatus', { 'enum': 'MigrationStatus',
'data': [ 'none', 'setup', 'cancelling', 'cancelled', 'data': [ 'none', 'setup', 'cancelling', 'cancelled',
'active', 'postcopy-active', 'completed', 'failed', 'colo' ] } 'active', 'postcopy-active', 'completed', 'failed', 'colo',
'pre-switchover', 'device' ] }
## ##
# @MigrationInfo: # @MigrationInfo:
...@@ -341,6 +347,9 @@ ...@@ -341,6 +347,9 @@
# @return-path: If enabled, migration will use the return path even # @return-path: If enabled, migration will use the return path even
# for precopy. (since 2.10) # for precopy. (since 2.10)
# #
# @pause-before-switchover: Pause outgoing migration before serialising device
# state and before disabling block IO (since 2.11)
#
# @x-multifd: Use more than one fd for migration (since 2.11) # @x-multifd: Use more than one fd for migration (since 2.11)
# #
# Since: 1.2 # Since: 1.2
...@@ -348,7 +357,7 @@ ...@@ -348,7 +357,7 @@
{ 'enum': 'MigrationCapability', { 'enum': 'MigrationCapability',
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram', 'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram',
'block', 'return-path', 'x-multifd' ] } 'block', 'return-path', 'pause-before-switchover', 'x-multifd' ] }
## ##
# @MigrationCapabilityStatus: # @MigrationCapabilityStatus:
...@@ -471,7 +480,7 @@ ...@@ -471,7 +480,7 @@
# number of sockets used for migration. The # number of sockets used for migration. The
# default value is 2 (since 2.11) # default value is 2 (since 2.11)
# #
# @x-multifd-page-count: Number of pages sent together to a thread # @x-multifd-page-count: Number of pages sent together to a thread.
# The default value is 16 (since 2.11) # The default value is 16 (since 2.11)
# #
# Since: 2.4 # Since: 2.4
...@@ -542,7 +551,7 @@ ...@@ -542,7 +551,7 @@
# number of sockets used for migration. The # number of sockets used for migration. The
# default value is 2 (since 2.11) # default value is 2 (since 2.11)
# #
# @x-multifd-page-count: Number of pages sent together to a thread # @x-multifd-page-count: Number of pages sent together to a thread.
# The default value is 16 (since 2.11) # The default value is 16 (since 2.11)
# #
# Since: 2.4 # Since: 2.4
...@@ -638,7 +647,7 @@ ...@@ -638,7 +647,7 @@
# number of sockets used for migration. # number of sockets used for migration.
# The default value is 2 (since 2.11) # The default value is 2 (since 2.11)
# #
# @x-multifd-page-count: Number of pages sent together to a thread # @x-multifd-page-count: Number of pages sent together to a thread.
# The default value is 16 (since 2.11) # The default value is 16 (since 2.11)
# #
# Since: 2.4 # Since: 2.4
...@@ -867,6 +876,23 @@ ...@@ -867,6 +876,23 @@
## ##
{ 'command': 'migrate_cancel' } { 'command': 'migrate_cancel' }
##
# @migrate-continue:
#
# Continue migration when it's in a paused state.
#
# @state: The state the migration is currently expected to be in
#
# Returns: nothing on success
# Since: 2.11
# Example:
#
# -> { "execute": "migrate-continue" , "arguments":
# { "state": "pre-switchover" } }
# <- { "return": {} }
##
{ 'command': 'migrate-continue', 'data': {'state': 'MigrationStatus'} }
## ##
# @migrate_set_downtime: # @migrate_set_downtime:
# #
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册