提交 efd1a1d6 编写于 作者: J Juan Quintela

multifd: Drop x-multifd-page-count parameter

Libvirt don't want to expose (and explain it).  From now on we measure
the number of packages in bytes instead of pages, so it is the same
independently of architecture.  We choose the page size of x86.
Notice that in the following patch we make this variable.
Signed-off-by: NJuan Quintela <quintela@redhat.com>
Reviewed-by: NDr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: NMarkus Armbruster <armbru@redhat.com>
Signed-off-by: NJuan Quintela <quintela@redhat.com>
上级 2a34ee59
......@@ -435,9 +435,6 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict)
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_X_MULTIFD_CHANNELS),
params->x_multifd_channels);
monitor_printf(mon, "%s: %u\n",
MigrationParameter_str(MIGRATION_PARAMETER_X_MULTIFD_PAGE_COUNT),
params->x_multifd_page_count);
monitor_printf(mon, "%s: %" PRIu64 "\n",
MigrationParameter_str(MIGRATION_PARAMETER_XBZRLE_CACHE_SIZE),
params->xbzrle_cache_size);
......@@ -1816,10 +1813,6 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict)
p->has_x_multifd_channels = true;
visit_type_int(v, param, &p->x_multifd_channels, &err);
break;
case MIGRATION_PARAMETER_X_MULTIFD_PAGE_COUNT:
p->has_x_multifd_page_count = true;
visit_type_int(v, param, &p->x_multifd_page_count, &err);
break;
case MIGRATION_PARAMETER_XBZRLE_CACHE_SIZE:
p->has_xbzrle_cache_size = true;
visit_type_size(v, param, &cache_size, &err);
......
......@@ -82,7 +82,6 @@
/* The delay time (in ms) between two COLO checkpoints */
#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100)
#define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2
#define DEFAULT_MIGRATE_MULTIFD_PAGE_COUNT 16
/* Background transfer rate for postcopy, 0 means unlimited, note
* that page requests can still exceed this limit.
......@@ -768,8 +767,6 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp)
params->block_incremental = s->parameters.block_incremental;
params->has_x_multifd_channels = true;
params->x_multifd_channels = s->parameters.x_multifd_channels;
params->has_x_multifd_page_count = true;
params->x_multifd_page_count = s->parameters.x_multifd_page_count;
params->has_xbzrle_cache_size = true;
params->xbzrle_cache_size = s->parameters.xbzrle_cache_size;
params->has_max_postcopy_bandwidth = true;
......@@ -1158,14 +1155,6 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp)
"is invalid, it should be in the range of 1 to 255");
return false;
}
if (params->has_x_multifd_page_count &&
(params->x_multifd_page_count < 1 ||
params->x_multifd_page_count > 10000)) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
"multifd_page_count",
"is invalid, it should be in the range of 1 to 10000");
return false;
}
if (params->has_xbzrle_cache_size &&
(params->xbzrle_cache_size < qemu_target_page_size() ||
......@@ -1277,9 +1266,6 @@ static void migrate_params_test_apply(MigrateSetParameters *params,
if (params->has_x_multifd_channels) {
dest->x_multifd_channels = params->x_multifd_channels;
}
if (params->has_x_multifd_page_count) {
dest->x_multifd_page_count = params->x_multifd_page_count;
}
if (params->has_xbzrle_cache_size) {
dest->xbzrle_cache_size = params->xbzrle_cache_size;
}
......@@ -1370,9 +1356,6 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
if (params->has_x_multifd_channels) {
s->parameters.x_multifd_channels = params->x_multifd_channels;
}
if (params->has_x_multifd_page_count) {
s->parameters.x_multifd_page_count = params->x_multifd_page_count;
}
if (params->has_xbzrle_cache_size) {
s->parameters.xbzrle_cache_size = params->xbzrle_cache_size;
xbzrle_cache_resize(params->xbzrle_cache_size, errp);
......@@ -2152,15 +2135,6 @@ int migrate_multifd_channels(void)
return s->parameters.x_multifd_channels;
}
int migrate_multifd_page_count(void)
{
MigrationState *s;
s = migrate_get_current();
return s->parameters.x_multifd_page_count;
}
int migrate_use_xbzrle(void)
{
MigrationState *s;
......@@ -3403,9 +3377,6 @@ static Property migration_properties[] = {
DEFINE_PROP_UINT8("x-multifd-channels", MigrationState,
parameters.x_multifd_channels,
DEFAULT_MIGRATE_MULTIFD_CHANNELS),
DEFINE_PROP_UINT32("x-multifd-page-count", MigrationState,
parameters.x_multifd_page_count,
DEFAULT_MIGRATE_MULTIFD_PAGE_COUNT),
DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState,
parameters.xbzrle_cache_size,
DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE),
......@@ -3495,7 +3466,6 @@ static void migration_instance_init(Object *obj)
params->has_x_checkpoint_delay = true;
params->has_block_incremental = true;
params->has_x_multifd_channels = true;
params->has_x_multifd_page_count = true;
params->has_xbzrle_cache_size = true;
params->has_max_postcopy_bandwidth = true;
params->has_max_cpu_throttle = true;
......
......@@ -274,7 +274,6 @@ bool migrate_auto_converge(void);
bool migrate_use_multifd(void);
bool migrate_pause_before_switchover(void);
int migrate_multifd_channels(void);
int migrate_multifd_page_count(void);
int migrate_use_xbzrle(void);
int64_t migrate_xbzrle_cache_size(void);
......
......@@ -583,6 +583,9 @@ exit:
#define MULTIFD_FLAG_SYNC (1 << 0)
/* This value needs to be a multiple of qemu_target_page_size() */
#define MULTIFD_PACKET_SIZE (64 * 1024)
typedef struct {
uint32_t magic;
uint32_t version;
......@@ -783,12 +786,13 @@ static void multifd_pages_clear(MultiFDPages_t *pages)
static void multifd_send_fill_packet(MultiFDSendParams *p)
{
MultiFDPacket_t *packet = p->packet;
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
int i;
packet->magic = cpu_to_be32(MULTIFD_MAGIC);
packet->version = cpu_to_be32(MULTIFD_VERSION);
packet->flags = cpu_to_be32(p->flags);
packet->pages_alloc = cpu_to_be32(migrate_multifd_page_count());
packet->pages_alloc = cpu_to_be32(page_count);
packet->pages_used = cpu_to_be32(p->pages->used);
packet->next_packet_size = cpu_to_be32(p->next_packet_size);
packet->packet_num = cpu_to_be64(p->packet_num);
......@@ -805,6 +809,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p)
static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
{
MultiFDPacket_t *packet = p->packet;
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
RAMBlock *block;
int i;
......@@ -827,10 +832,10 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
p->flags = be32_to_cpu(packet->flags);
packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
if (packet->pages_alloc > migrate_multifd_page_count()) {
if (packet->pages_alloc > page_count) {
error_setg(errp, "multifd: received packet "
"with size %d and expected maximum size %d",
packet->pages_alloc, migrate_multifd_page_count()) ;
packet->pages_alloc, page_count) ;
return -1;
}
......@@ -1162,7 +1167,7 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
int multifd_save_setup(void)
{
int thread_count;
uint32_t page_count = migrate_multifd_page_count();
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
uint8_t i;
if (!migrate_use_multifd()) {
......@@ -1362,7 +1367,7 @@ static void *multifd_recv_thread(void *opaque)
int multifd_load_setup(void)
{
int thread_count;
uint32_t page_count = migrate_multifd_page_count();
uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
uint8_t i;
if (!migrate_use_multifd()) {
......
......@@ -562,9 +562,6 @@
# number of sockets used for migration. The
# default value is 2 (since 2.11)
#
# @x-multifd-page-count: Number of pages sent together to a thread.
# The default value is 16 (since 2.11)
#
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
# needs to be a multiple of the target page size
# and a power of 2
......@@ -587,7 +584,7 @@
'cpu-throttle-initial', 'cpu-throttle-increment',
'tls-creds', 'tls-hostname', 'max-bandwidth',
'downtime-limit', 'x-checkpoint-delay', 'block-incremental',
'x-multifd-channels', 'x-multifd-page-count',
'x-multifd-channels',
'xbzrle-cache-size', 'max-postcopy-bandwidth',
'max-cpu-throttle' ] }
......@@ -667,9 +664,6 @@
# number of sockets used for migration. The
# default value is 2 (since 2.11)
#
# @x-multifd-page-count: Number of pages sent together to a thread.
# The default value is 16 (since 2.11)
#
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
# needs to be a multiple of the target page size
# and a power of 2
......@@ -704,7 +698,6 @@
'*x-checkpoint-delay': 'int',
'*block-incremental': 'bool',
'*x-multifd-channels': 'int',
'*x-multifd-page-count': 'int',
'*xbzrle-cache-size': 'size',
'*max-postcopy-bandwidth': 'size',
'*max-cpu-throttle': 'int' } }
......@@ -800,9 +793,6 @@
# number of sockets used for migration.
# The default value is 2 (since 2.11)
#
# @x-multifd-page-count: Number of pages sent together to a thread.
# The default value is 16 (since 2.11)
#
# @xbzrle-cache-size: cache size to be used by XBZRLE migration. It
# needs to be a multiple of the target page size
# and a power of 2
......@@ -836,7 +826,6 @@
'*x-checkpoint-delay': 'uint32',
'*block-incremental': 'bool' ,
'*x-multifd-channels': 'uint8',
'*x-multifd-page-count': 'uint32',
'*xbzrle-cache-size': 'size',
'*max-postcopy-bandwidth': 'size',
'*max-cpu-throttle':'uint8'} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册